xref: /freebsd/sys/dev/isp/isp_pci.c (revision a704009d8ae8531057cef91face602cab4d1941f)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 #include <dev/isp/isp_freebsd.h>
37 #include <dev/isp/asm_pci.h>
38 #include <sys/malloc.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 
43 #include <pci/pcireg.h>
44 #include <pci/pcivar.h>
45 
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/md_var.h>
50 
51 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
52 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
53 #ifndef ISP_DISABLE_1080_SUPPORT
54 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
55 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
56 #endif
57 static int isp_pci_mbxdma __P((struct ispsoftc *));
58 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
59 	ispreq_t *, u_int16_t *, u_int16_t));
60 static void
61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
62 
63 static void isp_pci_reset1 __P((struct ispsoftc *));
64 static void isp_pci_dumpregs __P((struct ispsoftc *));
65 
66 #ifndef	ISP_CODE_ORG
67 #define	ISP_CODE_ORG		0x1000
68 #endif
69 #ifndef	ISP_1040_RISC_CODE
70 #define	ISP_1040_RISC_CODE	NULL
71 #endif
72 #ifndef	ISP_1080_RISC_CODE
73 #define	ISP_1080_RISC_CODE	NULL
74 #endif
75 #ifndef	ISP_2100_RISC_CODE
76 #define	ISP_2100_RISC_CODE	NULL
77 #endif
78 #ifndef	ISP_2200_RISC_CODE
79 #define	ISP_2200_RISC_CODE	NULL
80 #endif
81 
82 #ifndef ISP_DISABLE_1020_SUPPORT
83 static struct ispmdvec mdvec = {
84 	isp_pci_rd_reg,
85 	isp_pci_wr_reg,
86 	isp_pci_mbxdma,
87 	isp_pci_dmasetup,
88 	isp_pci_dmateardown,
89 	NULL,
90 	isp_pci_reset1,
91 	isp_pci_dumpregs,
92 	ISP_1040_RISC_CODE,
93 	0,
94 	ISP_CODE_ORG,
95 	0,
96 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
97 	0
98 };
99 #endif
100 
101 #ifndef ISP_DISABLE_1080_SUPPORT
102 static struct ispmdvec mdvec_1080 = {
103 	isp_pci_rd_reg_1080,
104 	isp_pci_wr_reg_1080,
105 	isp_pci_mbxdma,
106 	isp_pci_dmasetup,
107 	isp_pci_dmateardown,
108 	NULL,
109 	isp_pci_reset1,
110 	isp_pci_dumpregs,
111 	ISP_1080_RISC_CODE,
112 	0,
113 	ISP_CODE_ORG,
114 	0,
115 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
116 	0
117 };
118 #endif
119 
120 #ifndef ISP_DISABLE_2100_SUPPORT
121 static struct ispmdvec mdvec_2100 = {
122 	isp_pci_rd_reg,
123 	isp_pci_wr_reg,
124 	isp_pci_mbxdma,
125 	isp_pci_dmasetup,
126 	isp_pci_dmateardown,
127 	NULL,
128 	isp_pci_reset1,
129 	isp_pci_dumpregs,
130 	ISP_2100_RISC_CODE,
131 	0,
132 	ISP_CODE_ORG,
133 	0,
134 	0,
135 	0
136 };
137 #endif
138 
139 #ifndef	ISP_DISABLE_2200_SUPPORT
140 static struct ispmdvec mdvec_2200 = {
141 	isp_pci_rd_reg,
142 	isp_pci_wr_reg,
143 	isp_pci_mbxdma,
144 	isp_pci_dmasetup,
145 	isp_pci_dmateardown,
146 	NULL,
147 	isp_pci_reset1,
148 	isp_pci_dumpregs,
149 	ISP_2200_RISC_CODE,
150 	0,
151 	ISP_CODE_ORG,
152 	0,
153 	0,
154 	0
155 };
156 #endif
157 
158 #ifndef	SCSI_ISP_PREFER_MEM_MAP
159 #define	SCSI_ISP_PREFER_MEM_MAP	0
160 #endif
161 
162 #ifndef	PCIM_CMD_INVEN
163 #define	PCIM_CMD_INVEN			0x10
164 #endif
165 #ifndef	PCIM_CMD_BUSMASTEREN
166 #define	PCIM_CMD_BUSMASTEREN		0x0004
167 #endif
168 #ifndef	PCIM_CMD_PERRESPEN
169 #define	PCIM_CMD_PERRESPEN		0x0040
170 #endif
171 #ifndef	PCIM_CMD_SEREN
172 #define	PCIM_CMD_SEREN			0x0100
173 #endif
174 
175 #ifndef	PCIR_COMMAND
176 #define	PCIR_COMMAND			0x04
177 #endif
178 
179 #ifndef	PCIR_CACHELNSZ
180 #define	PCIR_CACHELNSZ			0x0c
181 #endif
182 
183 #ifndef	PCIR_LATTIMER
184 #define	PCIR_LATTIMER			0x0d
185 #endif
186 
187 #ifndef	PCIR_ROMADDR
188 #define	PCIR_ROMADDR			0x30
189 #endif
190 
191 #ifndef	PCI_VENDOR_QLOGIC
192 #define	PCI_VENDOR_QLOGIC	0x1077
193 #endif
194 
195 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
196 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
197 #endif
198 
199 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
200 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
201 #endif
202 
203 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
204 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
205 #endif
206 
207 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
208 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
209 #endif
210 
211 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
212 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
213 #endif
214 
215 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
216 
217 #define	PCI_QLOGIC_ISP1080	\
218 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
219 
220 #define	PCI_QLOGIC_ISP1240	\
221 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
222 
223 #define	PCI_QLOGIC_ISP2100	\
224 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
225 
226 #define	PCI_QLOGIC_ISP2200	\
227 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
228 
229 #define	IO_MAP_REG	0x10
230 #define	MEM_MAP_REG	0x14
231 
232 #define	PCI_DFLT_LTNCY	0x40
233 #define	PCI_DFLT_LNSZ	0x10
234 
235 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
236 static void isp_pci_attach __P((pcici_t config_d, int unit));
237 
238 /* This distinguishing define is not right, but it does work */
239 #ifdef __alpha__
240 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
241 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
242 #else
243 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
244 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
245 #endif
246 
247 struct isp_pcisoftc {
248 	struct ispsoftc			pci_isp;
249         pcici_t				pci_id;
250 	bus_space_tag_t			pci_st;
251 	bus_space_handle_t		pci_sh;
252 	int16_t				pci_poff[_NREG_BLKS];
253 	bus_dma_tag_t			parent_dmat;
254 	bus_dma_tag_t			cntrol_dmat;
255 	bus_dmamap_t			cntrol_dmap;
256 	bus_dmamap_t			*dmaps;
257 };
258 
259 static u_long ispunit;
260 
261 static struct pci_device isp_pci_driver = {
262 	"isp",
263 	isp_pci_probe,
264 	isp_pci_attach,
265 	&ispunit,
266 	NULL
267 };
268 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
269 
270 
271 static const char *
272 isp_pci_probe(pcici_t tag, pcidi_t type)
273 {
274 	static int oneshot = 1;
275 	char *x;
276 
277         switch (type) {
278 #ifndef	ISP_DISABLE_1020_SUPPORT
279 	case PCI_QLOGIC_ISP:
280 		x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
281 		break;
282 #endif
283 #ifndef	ISP_DISABLE_1080_SUPPORT
284 	case PCI_QLOGIC_ISP1080:
285 		x = "Qlogic ISP 1080 PCI SCSI Adapter";
286 		break;
287 	case PCI_QLOGIC_ISP1240:
288 		x = "Qlogic ISP 1240 PCI SCSI Adapter";
289 		break;
290 #endif
291 #ifndef	ISP_DISABLE_2100_SUPPORT
292 	case PCI_QLOGIC_ISP2100:
293 		x = "Qlogic ISP 2100 PCI FC-AL Adapter";
294 		break;
295 #endif
296 #ifndef	ISP_DISABLE_2200_SUPPORT
297 	case PCI_QLOGIC_ISP2200:
298 		x = "Qlogic ISP 2200 PCI FC-AL Adapter";
299 		break;
300 #endif
301 	default:
302 		return (NULL);
303 	}
304 	if (oneshot) {
305 		oneshot = 0;
306 		CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, "
307 		    "Core Version %d.%d\n",
308 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
309 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
310 	}
311 	return (x);
312 }
313 
314 static void
315 isp_pci_attach(pcici_t cfid, int unit)
316 {
317 #ifdef	SCSI_ISP_WWN
318 	const char *name = SCSI_ISP_WWN;
319 	char *vtp = NULL;
320 #endif
321 	int mapped, prefer_mem_map, bitmap;
322 	pci_port_t io_port;
323 	u_int32_t data, rev, linesz, psize, basetype;
324 	struct isp_pcisoftc *pcs;
325 	struct ispsoftc *isp;
326 	vm_offset_t vaddr, paddr;
327 	struct ispmdvec *mdvp;
328 	bus_size_t lim;
329 	ISP_LOCKVAL_DECL;
330 
331 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
332 	if (pcs == NULL) {
333 		printf("isp%d: cannot allocate softc\n", unit);
334 		return;
335 	}
336 	bzero(pcs, sizeof (struct isp_pcisoftc));
337 
338 	/*
339 	 * Figure out if we're supposed to skip this one.
340 	 */
341 	if (getenv_int("isp_disable", &bitmap)) {
342 		if (bitmap & (1 << unit)) {
343 			printf("isp%d: not configuring\n", unit);
344 			return;
345 		}
346 	}
347 
348 	/*
349 	 * Figure out which we should try first - memory mapping or i/o mapping?
350 	 */
351 #if	SCSI_ISP_PREFER_MEM_MAP == 1
352 	prefer_mem_map = 1;
353 #else
354 	prefer_mem_map = 0;
355 #endif
356 	bitmap = 0;
357 	if (getenv_int("isp_mem_map", &bitmap)) {
358 		if (bitmap & (1 << unit))
359 			prefer_mem_map = 1;
360 	}
361 	bitmap = 0;
362 	if (getenv_int("isp_io_map", &bitmap)) {
363 		if (bitmap & (1 << unit))
364 			prefer_mem_map = 0;
365 	}
366 
367 	vaddr = paddr = NULL;
368 	mapped = 0;
369 	linesz = PCI_DFLT_LNSZ;
370 	/*
371 	 * Note that pci_conf_read is a 32 bit word aligned function.
372 	 */
373 	data = pci_conf_read(cfid, PCIR_COMMAND);
374 	if (prefer_mem_map) {
375 		if (data & PCI_COMMAND_MEM_ENABLE) {
376 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
377 				pcs->pci_st = MEM_SPACE_MAPPING;
378 				pcs->pci_sh = vaddr;
379 				mapped++;
380 			}
381 		}
382 		if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
383 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
384 				pcs->pci_st = IO_SPACE_MAPPING;
385 				pcs->pci_sh = io_port;
386 				mapped++;
387 			}
388 		}
389 	} else {
390 		if (data & PCI_COMMAND_IO_ENABLE) {
391 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
392 				pcs->pci_st = IO_SPACE_MAPPING;
393 				pcs->pci_sh = io_port;
394 				mapped++;
395 			}
396 		}
397 		if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
398 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
399 				pcs->pci_st = MEM_SPACE_MAPPING;
400 				pcs->pci_sh = vaddr;
401 				mapped++;
402 			}
403 		}
404 	}
405 	if (mapped == 0) {
406 		printf("isp%d: unable to map any ports!\n", unit);
407 		free(pcs, M_DEVBUF);
408 		return;
409 	}
410 	if (bootverbose)
411 		printf("isp%d: using %s space register mapping\n", unit,
412 		    pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
413 
414 	data = pci_conf_read(cfid, PCI_ID_REG);
415 	rev = pci_conf_read(cfid, PCI_CLASS_REG) & 0xff;	/* revision */
416 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
417 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
418 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
419 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
420 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
421 	/*
422  	 * GCC!
423 	 */
424 	mdvp = &mdvec;
425 	basetype = ISP_HA_SCSI_UNKNOWN;
426 	psize = sizeof (sdparam);
427 	lim = BUS_SPACE_MAXSIZE_32BIT;
428 #ifndef	ISP_DISABLE_1020_SUPPORT
429 	if (data == PCI_QLOGIC_ISP) {
430 		mdvp = &mdvec;
431 		basetype = ISP_HA_SCSI_UNKNOWN;
432 		psize = sizeof (sdparam);
433 		lim = BUS_SPACE_MAXSIZE_24BIT;
434 	}
435 #endif
436 #ifndef	ISP_DISABLE_1080_SUPPORT
437 	if (data == PCI_QLOGIC_ISP1080) {
438 		mdvp = &mdvec_1080;
439 		basetype = ISP_HA_SCSI_1080;
440 		psize = sizeof (sdparam);
441 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
442 		    ISP1080_DMA_REGS_OFF;
443 	}
444 	if (data == PCI_QLOGIC_ISP1240) {
445 		mdvp = &mdvec_1080;
446 		basetype = ISP_HA_SCSI_12X0;
447 		psize = 2 * sizeof (sdparam);
448 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
449 		    ISP1080_DMA_REGS_OFF;
450 	}
451 #endif
452 #ifndef	ISP_DISABLE_2100_SUPPORT
453 	if (data == PCI_QLOGIC_ISP2100) {
454 		mdvp = &mdvec_2100;
455 		basetype = ISP_HA_FC_2100;
456 		psize = sizeof (fcparam);
457 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
458 		    PCI_MBOX_REGS2100_OFF;
459 		if (rev < 3) {
460 			/*
461 			 * XXX: Need to get the actual revision
462 			 * XXX: number of the 2100 FB. At any rate,
463 			 * XXX: lower cache line size for early revision
464 			 * XXX; boards.
465 			 */
466 			linesz = 1;
467 		}
468 	}
469 #endif
470 #ifndef	ISP_DISABLE_2200_SUPPORT
471 	if (data == PCI_QLOGIC_ISP2200) {
472 		mdvp = &mdvec_2200;
473 		basetype = ISP_HA_FC_2200;
474 		psize = sizeof (fcparam);
475 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
476 		    PCI_MBOX_REGS2100_OFF;
477 	}
478 #endif
479 	isp = &pcs->pci_isp;
480 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
481 	if (isp->isp_param == NULL) {
482 		printf("isp%d: cannot allocate parameter data\n", unit);
483 		return;
484 	}
485 	bzero(isp->isp_param, psize);
486 	isp->isp_mdvec = mdvp;
487 	isp->isp_type = basetype;
488 	isp->isp_revision = rev;
489 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
490 	isp->isp_osinfo.unit = unit;
491 
492 	ISP_LOCK(isp);
493 
494 	/*
495 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
496 	 * are set.
497 	 */
498 	data = pci_cfgread(cfid, PCIR_COMMAND, 2);
499 	data |=	PCIM_CMD_SEREN		|
500 		PCIM_CMD_PERRESPEN	|
501 		PCIM_CMD_BUSMASTEREN	|
502 		PCIM_CMD_INVEN;
503 	pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
504 
505 	/*
506 	 * Make sure the Cache Line Size register is set sensibly.
507 	 */
508 	data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
509 	if (data != linesz) {
510 		data = PCI_DFLT_LNSZ;
511 		CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data);
512 		pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
513 	}
514 
515 	/*
516 	 * Make sure the Latency Timer is sane.
517 	 */
518 	data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
519 	if (data < PCI_DFLT_LTNCY) {
520 		data = PCI_DFLT_LTNCY;
521 		CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data);
522 		pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
523 	}
524 
525 	/*
526 	 * Make sure we've disabled the ROM.
527 	 */
528 	data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
529 	data &= ~1;
530 	pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
531 	ISP_UNLOCK(isp);
532 
533 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
534 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
535 	    255, lim, 0, &pcs->parent_dmat) != 0) {
536 		printf("%s: could not create master dma tag\n", isp->isp_name);
537 		free(pcs, M_DEVBUF);
538 		return;
539 	}
540 	if (pci_map_int(cfid, (void (*)(void *))isp_intr,
541 	    (void *)isp, &IMASK) == 0) {
542 		printf("%s: could not map interrupt\n", isp->isp_name);
543 		free(pcs, M_DEVBUF);
544 		return;
545 	}
546 
547 	pcs->pci_id = cfid;
548 #ifdef	SCSI_ISP_NO_FWLOAD_MASK
549 	if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
550 		isp->isp_confopts |= ISP_CFG_NORELOAD;
551 #endif
552 	if (getenv_int("isp_no_fwload", &bitmap)) {
553 		if (bitmap & (1 << unit))
554 			isp->isp_confopts |= ISP_CFG_NORELOAD;
555 	}
556 	if (getenv_int("isp_fwload", &bitmap)) {
557 		if (bitmap & (1 << unit))
558 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
559 	}
560 
561 #ifdef	SCSI_ISP_NO_NVRAM_MASK
562 	if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
563 		printf("%s: ignoring NVRAM\n", isp->isp_name);
564 		isp->isp_confopts |= ISP_CFG_NONVRAM;
565 	}
566 #endif
567 	if (getenv_int("isp_no_nvram", &bitmap)) {
568 		if (bitmap & (1 << unit))
569 			isp->isp_confopts |= ISP_CFG_NONVRAM;
570 	}
571 	if (getenv_int("isp_nvram", &bitmap)) {
572 		if (bitmap & (1 << unit))
573 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
574 	}
575 
576 #ifdef	SCSI_ISP_FCDUPLEX
577 	if (IS_FC(isp)) {
578 		if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
579 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
580 		}
581 	}
582 #endif
583 	if (getenv_int("isp_fcduplex", &bitmap)) {
584 		if (bitmap & (1 << unit))
585 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
586 	}
587 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
588 		if (bitmap & (1 << unit))
589 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
590 	}
591 	/*
592 	 * Look for overriding WWN. This is a Node WWN so it binds to
593 	 * all FC instances. A Port WWN will be constructed from it
594 	 * as appropriate.
595 	 */
596 #ifdef	SCSI_ISP_WWN
597 	isp->isp_osinfo.default_wwn = strtoq(name, &vtp, 16);
598 	if (vtp != name && *vtp == 0) {
599 		isp->isp_confopts |= ISP_CFG_OWNWWN;
600 	} else
601 #endif
602 	if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) {
603 		int i;
604 		u_int64_t seed = (u_int64_t) (intptr_t) isp;
605 
606 		seed <<= 16;
607 		seed &= ((1LL << 48) - 1LL);
608 		/*
609 		 * This isn't very random, but it's the best we can do for
610 		 * the real edge case of cards that don't have WWNs. If
611 		 * you recompile a new vers.c, you'll get a different WWN.
612 		 */
613 		for (i = 0; version[i] != 0; i++) {
614 			seed += version[i];
615 		}
616 		/*
617 		 * Make sure the top nibble has something vaguely sensible.
618 		 */
619 		isp->isp_osinfo.default_wwn |= (4LL << 60) | seed;
620 	} else {
621 		isp->isp_confopts |= ISP_CFG_OWNWWN;
622 	}
623 	(void) getenv_int("isp_debug", &isp_debug);
624 	ISP_LOCK(isp);
625 	isp_reset(isp);
626 	if (isp->isp_state != ISP_RESETSTATE) {
627 		(void) pci_unmap_int(cfid);
628 		ISP_UNLOCK(isp);
629 		free(pcs, M_DEVBUF);
630 		return;
631 	}
632 	isp_init(isp);
633 	if (isp->isp_state != ISP_INITSTATE) {
634 		/* If we're a Fibre Channel Card, we allow deferred attach */
635 		if (IS_SCSI(isp)) {
636 			isp_uninit(isp);
637 			(void) pci_unmap_int(cfid); /* Does nothing */
638 			ISP_UNLOCK(isp);
639 			free(pcs, M_DEVBUF);
640 			return;
641 		}
642 	}
643 	isp_attach(isp);
644 	if (isp->isp_state != ISP_RUNSTATE) {
645 		/* If we're a Fibre Channel Card, we allow deferred attach */
646 		if (IS_SCSI(isp)) {
647 			isp_uninit(isp);
648 			(void) pci_unmap_int(cfid); /* Does nothing */
649 			ISP_UNLOCK(isp);
650 			free(pcs, M_DEVBUF);
651 			return;
652 		}
653 	}
654 	ISP_UNLOCK(isp);
655 }
656 
657 static u_int16_t
658 isp_pci_rd_reg(isp, regoff)
659 	struct ispsoftc *isp;
660 	int regoff;
661 {
662 	u_int16_t rv;
663 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
664 	int offset, oldconf = 0;
665 
666 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
667 		/*
668 		 * We will assume that someone has paused the RISC processor.
669 		 */
670 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
671 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
672 	}
673 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
674 	offset += (regoff & 0xff);
675 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
676 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
677 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
678 	}
679 	return (rv);
680 }
681 
682 static void
683 isp_pci_wr_reg(isp, regoff, val)
684 	struct ispsoftc *isp;
685 	int regoff;
686 	u_int16_t val;
687 {
688 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
689 	int offset, oldconf = 0;
690 
691 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
692 		/*
693 		 * We will assume that someone has paused the RISC processor.
694 		 */
695 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
696 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
697 	}
698 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
699 	offset += (regoff & 0xff);
700 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
701 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
702 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
703 	}
704 }
705 
706 #ifndef	ISP_DISABLE_1080_SUPPORT
707 static u_int16_t
708 isp_pci_rd_reg_1080(isp, regoff)
709 	struct ispsoftc *isp;
710 	int regoff;
711 {
712 	u_int16_t rv;
713 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
714 	int offset, oc = 0;
715 
716 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
717 		/*
718 		 * We will assume that someone has paused the RISC processor.
719 		 */
720 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
721 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
722 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
723 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
724 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
725 	}
726 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
727 	offset += (regoff & 0xff);
728 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
729 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
730 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
731 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
732 	}
733 	return (rv);
734 }
735 
736 static void
737 isp_pci_wr_reg_1080(isp, regoff, val)
738 	struct ispsoftc *isp;
739 	int regoff;
740 	u_int16_t val;
741 {
742 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
743 	int offset, oc = 0;
744 
745 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
746 		/*
747 		 * We will assume that someone has paused the RISC processor.
748 		 */
749 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
750 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
751 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
752 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
753 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
754 	}
755 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
756 	offset += (regoff & 0xff);
757 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
758 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
759 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
760 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
761 	}
762 }
763 #endif
764 
765 
766 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
767 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
768 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
769 
770 struct imush {
771 	struct ispsoftc *isp;
772 	int error;
773 };
774 
775 static void
776 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
777 {
778 	struct imush *imushp = (struct imush *) arg;
779 	if (error) {
780 		imushp->error = error;
781 	} else {
782 		imushp->isp->isp_rquest_dma = segs->ds_addr;
783 	}
784 }
785 
786 static void
787 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
788 {
789 	struct imush *imushp = (struct imush *) arg;
790 	if (error) {
791 		imushp->error = error;
792 	} else {
793 		imushp->isp->isp_result_dma = segs->ds_addr;
794 	}
795 }
796 
797 static void
798 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
799 {
800 	struct imush *imushp = (struct imush *) arg;
801 	if (error) {
802 		imushp->error = error;
803 	} else {
804 		fcparam *fcp = imushp->isp->isp_param;
805 		fcp->isp_scdma = segs->ds_addr;
806 	}
807 }
808 
809 static int
810 isp_pci_mbxdma(struct ispsoftc *isp)
811 {
812 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
813 	caddr_t base;
814 	u_int32_t len;
815 	int i, error;
816 	bus_size_t lim;
817 	struct imush im;
818 
819 
820 	/*
821 	 * Already been here? If so, leave...
822 	 */
823 	if (isp->isp_rquest) {
824 		return (0);
825 	}
826 
827 	len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
828 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
829 	if (isp->isp_xflist == NULL) {
830 		printf("%s: can't alloc xflist array\n", isp->isp_name);
831 		return (1);
832 	}
833 	bzero(isp->isp_xflist, len);
834 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
835 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
836 	if (pci->dmaps == NULL) {
837 		printf("%s: can't alloc dma maps\n", isp->isp_name);
838 		free(isp->isp_xflist, M_DEVBUF);
839 		return (1);
840 	}
841 
842 	if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
843 		lim = BUS_SPACE_MAXADDR + 1;
844 	else
845 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
846 
847 	/*
848 	 * Allocate and map the request, result queues, plus FC scratch area.
849 	 */
850 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
851 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
852 	if (IS_FC(isp)) {
853 		len += ISP2100_SCRLEN;
854 	}
855 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
856 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
857 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
858 		printf("%s: cannot create a dma tag for control spaces\n",
859 		    isp->isp_name);
860 		free(isp->isp_xflist, M_DEVBUF);
861 		free(pci->dmaps, M_DEVBUF);
862 		return (1);
863 	}
864 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
865 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
866 		printf("%s: cannot allocate %d bytes of CCB memory\n",
867 		    isp->isp_name, len);
868 		free(isp->isp_xflist, M_DEVBUF);
869 		free(pci->dmaps, M_DEVBUF);
870 		return (1);
871 	}
872 
873 	isp->isp_rquest = base;
874 	im.isp = isp;
875 	im.error = 0;
876 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
877 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
878 	if (im.error) {
879 		printf("%s: error %d loading dma map for DMA request queue\n",
880 		    isp->isp_name, im.error);
881 		free(isp->isp_xflist, M_DEVBUF);
882 		free(pci->dmaps, M_DEVBUF);
883 		isp->isp_rquest = NULL;
884 		return (1);
885 	}
886 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
887 	im.error = 0;
888 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
889 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
890 	if (im.error) {
891 		printf("%s: error %d loading dma map for DMA result queue\n",
892 		    isp->isp_name, im.error);
893 		free(isp->isp_xflist, M_DEVBUF);
894 		free(pci->dmaps, M_DEVBUF);
895 		isp->isp_rquest = NULL;
896 		return (1);
897 	}
898 
899 	for (i = 0; i < isp->isp_maxcmds; i++) {
900 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
901 		if (error) {
902 			printf("%s: error %d creating per-cmd DMA maps\n",
903 			    isp->isp_name, error);
904 			free(isp->isp_xflist, M_DEVBUF);
905 			free(pci->dmaps, M_DEVBUF);
906 			isp->isp_rquest = NULL;
907 			return (1);
908 		}
909 	}
910 
911 	if (IS_FC(isp)) {
912 		fcparam *fcp = (fcparam *) isp->isp_param;
913 		fcp->isp_scratch = base +
914 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
915 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
916 		im.error = 0;
917 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
918 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
919 		if (im.error) {
920 			printf("%s: error %d loading FC scratch area\n",
921 			    isp->isp_name, im.error);
922 			free(isp->isp_xflist, M_DEVBUF);
923 			free(pci->dmaps, M_DEVBUF);
924 			isp->isp_rquest = NULL;
925 			return (1);
926 		}
927 	}
928 	return (0);
929 }
930 
931 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
932 typedef struct {
933 	struct ispsoftc *isp;
934 	ISP_SCSI_XFER_T *ccb;
935 	ispreq_t *rq;
936 	u_int16_t *iptrp;
937 	u_int16_t optr;
938 	u_int error;
939 } mush_t;
940 
941 #define	MUSHERR_NOQENTRIES	-2
942 
943 static void
944 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
945 {
946 	mush_t *mp;
947 	ISP_SCSI_XFER_T *ccb;
948 	struct ispsoftc *isp;
949 	struct isp_pcisoftc *pci;
950 	bus_dmamap_t *dp;
951 	bus_dma_segment_t *eseg;
952 	ispreq_t *rq;
953 	u_int16_t *iptrp;
954 	u_int16_t optr;
955 	ispcontreq_t *crq;
956 	int drq, seglim, datalen;
957 
958 	mp = (mush_t *) arg;
959 	if (error) {
960 		mp->error = error;
961 		return;
962 	}
963 
964 	isp = mp->isp;
965 	if (nseg < 1) {
966 		printf("%s: zero or negative segment count\n", isp->isp_name);
967 		mp->error = EFAULT;
968 		return;
969 	}
970 	ccb = mp->ccb;
971 	rq = mp->rq;
972 	iptrp = mp->iptrp;
973 	optr = mp->optr;
974 	pci = (struct isp_pcisoftc *)isp;
975 	dp = &pci->dmaps[rq->req_handle - 1];
976 
977 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
978 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
979 		drq = REQFLAG_DATA_IN;
980 	} else {
981 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
982 		drq = REQFLAG_DATA_OUT;
983 	}
984 
985 	datalen = XS_XFRLEN(ccb);
986 	if (IS_FC(isp)) {
987 		seglim = ISP_RQDSEG_T2;
988 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
989 		((ispreqt2_t *)rq)->req_flags |= drq;
990 	} else {
991 		seglim = ISP_RQDSEG;
992 		rq->req_flags |= drq;
993 	}
994 
995 	eseg = dm_segs + nseg;
996 
997 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
998 		if (IS_FC(isp)) {
999 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1000 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1001 			    dm_segs->ds_addr;
1002 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1003 			    dm_segs->ds_len;
1004 		} else {
1005 			rq->req_dataseg[rq->req_seg_count].ds_base =
1006 				dm_segs->ds_addr;
1007 			rq->req_dataseg[rq->req_seg_count].ds_count =
1008 				dm_segs->ds_len;
1009 		}
1010 		datalen -= dm_segs->ds_len;
1011 #if	0
1012 		if (IS_FC(isp)) {
1013 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1014 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1015 			    isp->isp_name, rq->req_seg_count,
1016 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1017 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1018 		} else {
1019 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1020 			    isp->isp_name, rq->req_seg_count,
1021 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1022 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1023 		}
1024 #endif
1025 		rq->req_seg_count++;
1026 		dm_segs++;
1027 	}
1028 
1029 	while (datalen > 0 && dm_segs != eseg) {
1030 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
1031 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
1032 		if (*iptrp == optr) {
1033 #if	0
1034 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
1035 #endif
1036 			mp->error = MUSHERR_NOQENTRIES;
1037 			return;
1038 		}
1039 		rq->req_header.rqs_entry_count++;
1040 		bzero((void *)crq, sizeof (*crq));
1041 		crq->req_header.rqs_entry_count = 1;
1042 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1043 
1044 		seglim = 0;
1045 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1046 			crq->req_dataseg[seglim].ds_base =
1047 			    dm_segs->ds_addr;
1048 			crq->req_dataseg[seglim].ds_count =
1049 			    dm_segs->ds_len;
1050 #if	0
1051 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1052 			    isp->isp_name, rq->req_header.rqs_entry_count-1,
1053 			    seglim, crq->req_dataseg[seglim].ds_count,
1054 			    crq->req_dataseg[seglim].ds_base);
1055 #endif
1056 			rq->req_seg_count++;
1057 			dm_segs++;
1058 			seglim++;
1059 			datalen -= dm_segs->ds_len;
1060 		}
1061 	}
1062 }
1063 
1064 static int
1065 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1066 	u_int16_t *iptrp, u_int16_t optr)
1067 {
1068 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1069 	struct ccb_hdr *ccb_h;
1070 	struct ccb_scsiio *csio;
1071 	bus_dmamap_t *dp = NULL;
1072 	mush_t mush, *mp;
1073 
1074 	csio = (struct ccb_scsiio *) ccb;
1075 	ccb_h = &csio->ccb_h;
1076 
1077 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1078 		rq->req_seg_count = 1;
1079 		return (CMD_QUEUED);
1080 	}
1081 
1082 	/*
1083 	 * Do a virtual grapevine step to collect info for
1084 	 * the callback dma allocation that we have to use...
1085 	 */
1086 	mp = &mush;
1087 	mp->isp = isp;
1088 	mp->ccb = ccb;
1089 	mp->rq = rq;
1090 	mp->iptrp = iptrp;
1091 	mp->optr = optr;
1092 	mp->error = 0;
1093 
1094 	if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1095 		if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1096 			int error, s;
1097 			dp = &pci->dmaps[rq->req_handle - 1];
1098 			s = splsoftvm();
1099 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1100 			    csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1101 			if (error == EINPROGRESS) {
1102 				bus_dmamap_unload(pci->parent_dmat, *dp);
1103 				mp->error = EINVAL;
1104 				printf("%s: deferred dma allocation not "
1105 				    "supported\n", isp->isp_name);
1106 			} else if (error && mp->error == 0) {
1107 #ifdef	DIAGNOSTIC
1108 				printf("%s: error %d in dma mapping code\n",
1109 				    isp->isp_name, error);
1110 #endif
1111 				mp->error = error;
1112 			}
1113 			splx(s);
1114 		} else {
1115 			/* Pointer to physical buffer */
1116 			struct bus_dma_segment seg;
1117 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1118 			seg.ds_len = csio->dxfer_len;
1119 			dma2(mp, &seg, 1, 0);
1120 		}
1121 	} else {
1122 		struct bus_dma_segment *segs;
1123 
1124 		if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1125 			printf("%s: Physical segment pointers unsupported",
1126 				isp->isp_name);
1127 			mp->error = EINVAL;
1128 		} else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1129 			printf("%s: Virtual segment addresses unsupported",
1130 				isp->isp_name);
1131 			mp->error = EINVAL;
1132 		} else {
1133 			/* Just use the segments provided */
1134 			segs = (struct bus_dma_segment *) csio->data_ptr;
1135 			dma2(mp, segs, csio->sglist_cnt, 0);
1136 		}
1137 	}
1138 	if (mp->error) {
1139 		int retval = CMD_COMPLETE;
1140 		if (mp->error == MUSHERR_NOQENTRIES) {
1141 			retval = CMD_EAGAIN;
1142 		} else if (mp->error == EFBIG) {
1143 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1144 		} else if (mp->error == EINVAL) {
1145 			XS_SETERR(csio, CAM_REQ_INVALID);
1146 		} else {
1147 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1148 		}
1149 		return (retval);
1150 	} else {
1151 		/*
1152 		 * Check to see if we weren't cancelled while sleeping on
1153 		 * getting DMA resources...
1154 		 */
1155 		if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1156 			if (dp) {
1157 				bus_dmamap_unload(pci->parent_dmat, *dp);
1158 			}
1159 			return (CMD_COMPLETE);
1160 		}
1161 		return (CMD_QUEUED);
1162 	}
1163 }
1164 
1165 static void
1166 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle)
1167 {
1168 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1169 	bus_dmamap_t *dp = &pci->dmaps[handle - 1];
1170 	KASSERT((handle > 0 && handle <= isp->isp_maxcmds),
1171 	    ("bad handle in isp_pci_dmateardonw"));
1172 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1173 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1174 	} else {
1175 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1176 	}
1177 	bus_dmamap_unload(pci->parent_dmat, *dp);
1178 }
1179 
1180 
1181 static void
1182 isp_pci_reset1(struct ispsoftc *isp)
1183 {
1184 	/* Make sure the BIOS is disabled */
1185 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1186 }
1187 
1188 static void
1189 isp_pci_dumpregs(struct ispsoftc *isp)
1190 {
1191 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1192 	printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1193 	    pci_conf_read(pci->pci_id, PCIR_COMMAND));
1194 }
1195