xref: /titanic_51/usr/src/uts/i86pc/io/rootnex.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Intel PC root nexus driver
31  *	based on sun4c root nexus driver 1.30
32  */
33 
34 #include <sys/sysmacros.h>
35 #include <sys/conf.h>
36 #include <sys/autoconf.h>
37 #include <sys/sysmacros.h>
38 #include <sys/debug.h>
39 #include <sys/psw.h>
40 #include <sys/ddidmareq.h>
41 #include <sys/promif.h>
42 #include <sys/devops.h>
43 #include <sys/kmem.h>
44 #include <sys/cmn_err.h>
45 #include <vm/seg.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_dev.h>
48 #include <sys/vmem.h>
49 #include <sys/mman.h>
50 #include <vm/hat.h>
51 #include <vm/as.h>
52 #include <vm/page.h>
53 #include <sys/avintr.h>
54 #include <sys/errno.h>
55 #include <sys/modctl.h>
56 #include <sys/ddi_impldefs.h>
57 #include <sys/sunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/psm.h>
60 #include <sys/ontrap.h>
61 
62 #define	ptob64(x)		(((uint64_t)(x)) << PAGESHIFT)
63 
64 extern void	i86_pp_map(page_t *, caddr_t);
65 extern void	i86_va_map(caddr_t, struct as *, caddr_t);
66 extern int	(*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
67 		    psm_intr_op_t, int *);
68 extern int	isa_resource_setup(void);
69 
70 /* Semi-temporary patchables to phase in bug fixes */
71 int rootnex_bind_fail = 1;
72 int rootnex_bind_warn = 1;
73 uint8_t *rootnex_warn_list;
74 
75 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
76 #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
77 
78 /*
79  * DMA related static data
80  */
81 static uintptr_t dvma_call_list_id = 0;
82 
83 /*
84  * Use device arena to use for device control register mappings.
85  * Various kernel memory walkers (debugger, dtrace) need to know
86  * to avoid this address range to prevent undesired device activity.
87  */
88 extern void *device_arena_alloc(size_t size, int vm_flag);
89 extern void device_arena_free(void * vaddr, size_t size);
90 
91 
92 /*
93  * Hack to handle poke faults on Calvin-class machines
94  */
95 extern int pokefault;
96 static kmutex_t pokefault_mutex;
97 
98 
99 /*
100  * Internal functions
101  */
102 static int
103 rootnex_ctl_children(dev_info_t *dip, dev_info_t *rdip,
104     ddi_ctl_enum_t ctlop, dev_info_t *child);
105 
106 static int
107 rootnex_ctlops_poke(peekpoke_ctlops_t *in_args);
108 
109 static int
110 rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result);
111 
112 /*
113  * config information
114  */
115 
116 static int
117 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
118     off_t offset, off_t len, caddr_t *vaddrp);
119 
120 static int
121 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
122     struct hat *hat, struct seg *seg, caddr_t addr,
123     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
124 
125 static int
126 rootnex_dma_allochdl(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
127     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *);
128 
129 static int
130 rootnex_dma_freehdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
131 
132 static int
133 rootnex_dma_bindhdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
134     struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
135 
136 static int
137 rootnex_dma_unbindhdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
138 
139 static int
140 rootnex_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
141     off_t, size_t, uint_t);
142 
143 static int
144 rootnex_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
145     uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
146 
147 static int
148 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
149     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
150 
151 static int
152 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
153     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
154     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
155 
156 static int
157 rootnex_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, void *);
158 
159 static struct intrspec *
160 rootnex_get_ispec(dev_info_t *rdip, int inum);
161 
162 static int
163 rootnex_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
164     ddi_intr_handle_impl_t *, void *);
165 
166 static struct bus_ops rootnex_bus_ops = {
167 	BUSO_REV,
168 	rootnex_map,
169 	NULL,
170 	NULL,
171 	NULL,
172 	rootnex_map_fault,
173 	rootnex_dma_map,
174 	rootnex_dma_allochdl,
175 	rootnex_dma_freehdl,
176 	rootnex_dma_bindhdl,
177 	rootnex_dma_unbindhdl,
178 	rootnex_dma_flush,
179 	rootnex_dma_win,
180 	rootnex_dma_mctl,
181 	rootnex_ctlops,
182 	ddi_bus_prop_op,
183 	i_ddi_rootnex_get_eventcookie,
184 	i_ddi_rootnex_add_eventcall,
185 	i_ddi_rootnex_remove_eventcall,
186 	i_ddi_rootnex_post_event,
187 	0,			/* bus_intr_ctl */
188 	0,			/* bus_config */
189 	0,			/* bus_unconfig */
190 	NULL,			/* bus_fm_init */
191 	NULL,			/* bus_fm_fini */
192 	NULL,			/* bus_fm_access_enter */
193 	NULL,			/* bus_fm_access_exit */
194 	NULL,			/* bus_powr */
195 	rootnex_intr_ops	/* bus_intr_op */
196 };
197 
198 struct priv_handle {
199 	caddr_t	ph_vaddr;
200 	union {
201 		page_t *pp;
202 		struct as *asp;
203 	}ph_u;
204 	uint_t  ph_mapinfo;
205 	uint64_t ph_padr;
206 };
207 static uint64_t rootnex_get_phyaddr();
208 static int rootnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
209 static int rootnex_io_rdsync(ddi_dma_impl_t *hp);
210 static int rootnex_io_wtsync(ddi_dma_impl_t *hp, int);
211 static int rootnex_io_brkup_attr(dev_info_t *dip, dev_info_t *rdip,
212     struct ddi_dma_req *dmareq, ddi_dma_handle_t handle,
213     struct priv_handle *php);
214 static int rootnex_io_brkup_lim(dev_info_t *dip, dev_info_t *rdip,
215     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep,
216     ddi_dma_lim_t *dma_lim, struct priv_handle *php);
217 
218 static struct dev_ops rootnex_ops = {
219 	DEVO_REV,
220 	0,		/* refcnt */
221 	ddi_no_info,	/* info */
222 	nulldev,
223 	nulldev,	/* probe */
224 	rootnex_attach,
225 	nulldev,	/* detach */
226 	nulldev,	/* reset */
227 	0,		/* cb_ops */
228 	&rootnex_bus_ops
229 };
230 
231 /*
232  * Module linkage information for the kernel.
233  */
234 
235 static struct modldrv modldrv = {
236 	&mod_driverops, /* Type of module.  This one is a nexus driver */
237 	"i86pc root nexus %I%",
238 	&rootnex_ops,	/* Driver ops */
239 };
240 
241 static struct modlinkage modlinkage = {
242 	MODREV_1, (void *)&modldrv, NULL
243 };
244 
245 
246 int
247 _init(void)
248 {
249 	return (mod_install(&modlinkage));
250 }
251 
252 int
253 _fini(void)
254 {
255 	return (EBUSY);
256 }
257 
258 int
259 _info(struct modinfo *modinfop)
260 {
261 	return (mod_info(&modlinkage, modinfop));
262 }
263 
264 /*
265  * rootnex_attach:
266  *
267  *	attach the root nexus.
268  */
269 
270 static void add_root_props(dev_info_t *);
271 
272 /*ARGSUSED*/
273 static int
274 rootnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
275 {
276 	mutex_init(&pokefault_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(15));
277 
278 	add_root_props(devi);
279 
280 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(devi));
281 
282 	i_ddi_rootnex_init_events(devi);
283 
284 	/*
285 	 * allocate array to track which major numbers we have printed warnings
286 	 * for.
287 	 */
288 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
289 	    KM_SLEEP);
290 
291 	return (DDI_SUCCESS);
292 }
293 
294 
295 /*
296  * Add statically defined root properties to this list...
297  */
298 
299 static const int pagesize = PAGESIZE;
300 static const int mmu_pagesize = MMU_PAGESIZE;
301 static const int mmu_pageoffset = MMU_PAGEOFFSET;
302 
303 struct prop_def {
304 	char *prop_name;
305 	caddr_t prop_value;
306 };
307 
308 static struct prop_def root_props[] = {
309 	{ "PAGESIZE",		(caddr_t)&pagesize },
310 	{ "MMU_PAGESIZE",	(caddr_t)&mmu_pagesize},
311 	{ "MMU_PAGEOFFSET",	(caddr_t)&mmu_pageoffset},
312 };
313 
314 #define	NROOT_PROPS	(sizeof (root_props) / sizeof (struct prop_def))
315 
316 static void
317 add_root_props(dev_info_t *devi)
318 {
319 	int i;
320 	struct prop_def *rpp;
321 
322 	/*
323 	 * Note this for loop works because all of the root_prop
324 	 * properties are integers - if this changes, the for
325 	 * loop will have to change.
326 	 */
327 	for (i = 0, rpp = root_props; i < NROOT_PROPS; ++i, ++rpp) {
328 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, devi,
329 		    rpp->prop_name, *((int *)rpp->prop_value));
330 	}
331 
332 	/*
333 	 * Create the root node "boolean" property
334 	 * corresponding to addressing type supported in the root node:
335 	 *
336 	 * Choices are:
337 	 *	"relative-addressing" (OBP PROMS)
338 	 *	"generic-addressing"  (Sun4 -- pseudo OBP/DDI)
339 	 */
340 
341 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, devi,
342 	    DDI_RELATIVE_ADDRESSING, 1);
343 
344 }
345 
346 /*
347  * #define	DDI_MAP_DEBUG (c.f. ddi_impl.c)
348  */
349 #ifdef	DDI_MAP_DEBUG
350 extern int ddi_map_debug_flag;
351 #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
352 #endif	/* DDI_MAP_DEBUG */
353 
354 
355 /*
356  * we don't support mapping of I/O cards above 4Gb
357  */
358 static int
359 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
360 {
361 	ulong_t base;
362 	void *cvaddr;
363 	uint_t npages, pgoffset;
364 	struct regspec *rp;
365 	ddi_acc_hdl_t *hp;
366 	ddi_acc_impl_t *ap;
367 	uint_t	hat_acc_flags;
368 
369 	rp = mp->map_obj.rp;
370 	hp = mp->map_handlep;
371 
372 #ifdef	DDI_MAP_DEBUG
373 	ddi_map_debug(
374 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
375 	    rp->regspec_bustype, rp->regspec_addr,
376 	    rp->regspec_size, mp->map_handlep);
377 #endif	/* DDI_MAP_DEBUG */
378 
379 	/*
380 	 * I/O or memory mapping
381 	 *
382 	 *	<bustype=0, addr=x, len=x>: memory
383 	 *	<bustype=1, addr=x, len=x>: i/o
384 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
385 	 */
386 
387 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
388 		cmn_err(CE_WARN, "rootnex: invalid register spec"
389 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
390 		    rp->regspec_addr, rp->regspec_size);
391 		return (DDI_FAILURE);
392 	}
393 
394 	if (rp->regspec_bustype != 0) {
395 		/*
396 		 * I/O space - needs a handle.
397 		 */
398 		if (hp == NULL) {
399 			return (DDI_FAILURE);
400 		}
401 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
402 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
403 		impl_acc_hdl_init(hp);
404 
405 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
406 #ifdef  DDI_MAP_DEBUG
407 			ddi_map_debug("rootnex_map_regspec: mmap() \
408 to I/O space is not supported.\n");
409 #endif  /* DDI_MAP_DEBUG */
410 			return (DDI_ME_INVAL);
411 		} else {
412 			/*
413 			 * 1275-compliant vs. compatibility i/o mapping
414 			 */
415 			*vaddrp =
416 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
417 				((caddr_t)(uintptr_t)rp->regspec_bustype) :
418 				((caddr_t)(uintptr_t)rp->regspec_addr);
419 		}
420 
421 #ifdef	DDI_MAP_DEBUG
422 		ddi_map_debug(
423 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
424 		    rp->regspec_size, *vaddrp);
425 #endif	/* DDI_MAP_DEBUG */
426 		return (DDI_SUCCESS);
427 	}
428 
429 	/*
430 	 * Memory space
431 	 */
432 
433 	if (hp != NULL) {
434 		/*
435 		 * hat layer ignores
436 		 * hp->ah_acc.devacc_attr_endian_flags.
437 		 */
438 		switch (hp->ah_acc.devacc_attr_dataorder) {
439 		case DDI_STRICTORDER_ACC:
440 			hat_acc_flags = HAT_STRICTORDER;
441 			break;
442 		case DDI_UNORDERED_OK_ACC:
443 			hat_acc_flags = HAT_UNORDERED_OK;
444 			break;
445 		case DDI_MERGING_OK_ACC:
446 			hat_acc_flags = HAT_MERGING_OK;
447 			break;
448 		case DDI_LOADCACHING_OK_ACC:
449 			hat_acc_flags = HAT_LOADCACHING_OK;
450 			break;
451 		case DDI_STORECACHING_OK_ACC:
452 			hat_acc_flags = HAT_STORECACHING_OK;
453 			break;
454 		}
455 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
456 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
457 		impl_acc_hdl_init(hp);
458 		hp->ah_hat_flags = hat_acc_flags;
459 	} else {
460 		hat_acc_flags = HAT_STRICTORDER;
461 	}
462 
463 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
464 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
465 
466 	if (rp->regspec_size == 0) {
467 #ifdef  DDI_MAP_DEBUG
468 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
469 #endif  /* DDI_MAP_DEBUG */
470 		return (DDI_ME_INVAL);
471 	}
472 
473 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
474 		*vaddrp = (caddr_t)mmu_btop(base);
475 	} else {
476 		npages = mmu_btopr(rp->regspec_size + pgoffset);
477 
478 #ifdef	DDI_MAP_DEBUG
479 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages \
480 physical %x ",
481 		    npages, base);
482 #endif	/* DDI_MAP_DEBUG */
483 
484 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
485 		if (cvaddr == NULL)
486 			return (DDI_ME_NORESOURCES);
487 
488 		/*
489 		 * Now map in the pages we've allocated...
490 		 */
491 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
492 		    mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK);
493 		*vaddrp = (caddr_t)cvaddr + pgoffset;
494 	}
495 
496 #ifdef	DDI_MAP_DEBUG
497 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
498 #endif	/* DDI_MAP_DEBUG */
499 	return (DDI_SUCCESS);
500 }
501 
502 static int
503 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
504 {
505 	caddr_t addr = (caddr_t)*vaddrp;
506 	uint_t npages, pgoffset;
507 	struct regspec *rp;
508 
509 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
510 		return (0);
511 
512 	rp = mp->map_obj.rp;
513 
514 	if (rp->regspec_size == 0) {
515 #ifdef  DDI_MAP_DEBUG
516 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
517 #endif  /* DDI_MAP_DEBUG */
518 		return (DDI_ME_INVAL);
519 	}
520 
521 	/*
522 	 * I/O or memory mapping:
523 	 *
524 	 *	<bustype=0, addr=x, len=x>: memory
525 	 *	<bustype=1, addr=x, len=x>: i/o
526 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
527 	 */
528 	if (rp->regspec_bustype != 0) {
529 		/*
530 		 * This is I/O space, which requires no particular
531 		 * processing on unmap since it isn't mapped in the
532 		 * first place.
533 		 */
534 		return (DDI_SUCCESS);
535 	}
536 
537 	/*
538 	 * Memory space
539 	 */
540 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
541 	npages = mmu_btopr(rp->regspec_size + pgoffset);
542 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
543 	device_arena_free(addr - pgoffset, ptob(npages));
544 
545 	/*
546 	 * Destroy the pointer - the mapping has logically gone
547 	 */
548 	*vaddrp = NULL;
549 
550 	return (DDI_SUCCESS);
551 }
552 
553 static int
554 rootnex_map_handle(ddi_map_req_t *mp)
555 {
556 	ddi_acc_hdl_t *hp;
557 	ulong_t base;
558 	uint_t pgoffset;
559 	struct regspec *rp;
560 
561 	rp = mp->map_obj.rp;
562 
563 #ifdef	DDI_MAP_DEBUG
564 	ddi_map_debug(
565 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
566 	    rp->regspec_bustype, rp->regspec_addr,
567 	    rp->regspec_size, mp->map_handlep);
568 #endif	/* DDI_MAP_DEBUG */
569 
570 	/*
571 	 * I/O or memory mapping:
572 	 *
573 	 *	<bustype=0, addr=x, len=x>: memory
574 	 *	<bustype=1, addr=x, len=x>: i/o
575 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
576 	 */
577 	if (rp->regspec_bustype != 0) {
578 		/*
579 		 * This refers to I/O space, and we don't support "mapping"
580 		 * I/O space to a user.
581 		 */
582 		return (DDI_FAILURE);
583 	}
584 
585 	/*
586 	 * Set up the hat_flags for the mapping.
587 	 */
588 	hp = mp->map_handlep;
589 
590 	switch (hp->ah_acc.devacc_attr_endian_flags) {
591 	case DDI_NEVERSWAP_ACC:
592 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
593 		break;
594 	case DDI_STRUCTURE_LE_ACC:
595 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
596 		break;
597 	case DDI_STRUCTURE_BE_ACC:
598 		return (DDI_FAILURE);
599 	default:
600 		return (DDI_REGS_ACC_CONFLICT);
601 	}
602 
603 	switch (hp->ah_acc.devacc_attr_dataorder) {
604 	case DDI_STRICTORDER_ACC:
605 		break;
606 	case DDI_UNORDERED_OK_ACC:
607 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
608 		break;
609 	case DDI_MERGING_OK_ACC:
610 		hp->ah_hat_flags |= HAT_MERGING_OK;
611 		break;
612 	case DDI_LOADCACHING_OK_ACC:
613 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
614 		break;
615 	case DDI_STORECACHING_OK_ACC:
616 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
617 		break;
618 	default:
619 		return (DDI_FAILURE);
620 	}
621 
622 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
623 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
624 
625 	if (rp->regspec_size == 0)
626 		return (DDI_ME_INVAL);
627 
628 	hp->ah_pfn = mmu_btop(base);
629 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
630 
631 	return (DDI_SUCCESS);
632 }
633 
634 static int
635 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
636 	off_t offset, off_t len, caddr_t *vaddrp)
637 {
638 	struct regspec *rp, tmp_reg;
639 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
640 	int error;
641 
642 	mp = &mr;
643 
644 	switch (mp->map_op)  {
645 	case DDI_MO_MAP_LOCKED:
646 	case DDI_MO_UNMAP:
647 	case DDI_MO_MAP_HANDLE:
648 		break;
649 	default:
650 #ifdef	DDI_MAP_DEBUG
651 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
652 		    mp->map_op);
653 #endif	/* DDI_MAP_DEBUG */
654 		return (DDI_ME_UNIMPLEMENTED);
655 	}
656 
657 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
658 #ifdef	DDI_MAP_DEBUG
659 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
660 #endif	/* DDI_MAP_DEBUG */
661 		return (DDI_ME_UNIMPLEMENTED);
662 	}
663 
664 	/*
665 	 * First, if given an rnumber, convert it to a regspec...
666 	 * (Presumably, this is on behalf of a child of the root node?)
667 	 */
668 
669 	if (mp->map_type == DDI_MT_RNUMBER)  {
670 
671 		int rnumber = mp->map_obj.rnumber;
672 #ifdef	DDI_MAP_DEBUG
673 		static char *out_of_range =
674 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
675 #endif	/* DDI_MAP_DEBUG */
676 
677 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
678 		if (rp == NULL)  {
679 #ifdef	DDI_MAP_DEBUG
680 			cmn_err(CE_WARN, out_of_range, rnumber,
681 			    ddi_get_name(rdip));
682 #endif	/* DDI_MAP_DEBUG */
683 			return (DDI_ME_RNUMBER_RANGE);
684 		}
685 
686 		/*
687 		 * Convert the given ddi_map_req_t from rnumber to regspec...
688 		 */
689 
690 		mp->map_type = DDI_MT_REGSPEC;
691 		mp->map_obj.rp = rp;
692 	}
693 
694 	/*
695 	 * Adjust offset and length correspnding to called values...
696 	 * XXX: A non-zero length means override the one in the regspec
697 	 * XXX: (regardless of what's in the parent's range?)
698 	 */
699 
700 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
701 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
702 
703 #ifdef	DDI_MAP_DEBUG
704 	cmn_err(CE_CONT,
705 		"rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>"
706 		" offset %d len %d handle 0x%x\n",
707 		ddi_get_name(dip), ddi_get_name(rdip),
708 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
709 		offset, len, mp->map_handlep);
710 #endif	/* DDI_MAP_DEBUG */
711 
712 	/*
713 	 * I/O or memory mapping:
714 	 *
715 	 *	<bustype=0, addr=x, len=x>: memory
716 	 *	<bustype=1, addr=x, len=x>: i/o
717 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
718 	 */
719 
720 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
721 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
722 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
723 		    ddi_get_name(rdip), rp->regspec_bustype,
724 		    rp->regspec_addr, rp->regspec_size);
725 		return (DDI_ME_INVAL);
726 	}
727 
728 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
729 		/*
730 		 * compatibility i/o mapping
731 		 */
732 		rp->regspec_bustype += (uint_t)offset;
733 	} else {
734 		/*
735 		 * Normal memory or i/o mapping
736 		 */
737 		rp->regspec_addr += (uint_t)offset;
738 	}
739 
740 	if (len != 0)
741 		rp->regspec_size = (uint_t)len;
742 
743 #ifdef	DDI_MAP_DEBUG
744 	cmn_err(CE_CONT,
745 		"             <%s,%s> <0x%x, 0x%x, 0x%d>"
746 		" offset %d len %d handle 0x%x\n",
747 		ddi_get_name(dip), ddi_get_name(rdip),
748 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
749 		offset, len, mp->map_handlep);
750 #endif	/* DDI_MAP_DEBUG */
751 
752 	/*
753 	 * Apply any parent ranges at this level, if applicable.
754 	 * (This is where nexus specific regspec translation takes place.
755 	 * Use of this function is implicit agreement that translation is
756 	 * provided via ddi_apply_range.)
757 	 */
758 
759 #ifdef	DDI_MAP_DEBUG
760 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
761 	    ddi_get_name(dip), ddi_get_name(rdip));
762 #endif	/* DDI_MAP_DEBUG */
763 
764 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
765 		return (error);
766 
767 	switch (mp->map_op)  {
768 	case DDI_MO_MAP_LOCKED:
769 
770 		/*
771 		 * Set up the locked down kernel mapping to the regspec...
772 		 */
773 
774 		return (rootnex_map_regspec(mp, vaddrp));
775 
776 	case DDI_MO_UNMAP:
777 
778 		/*
779 		 * Release mapping...
780 		 */
781 
782 		return (rootnex_unmap_regspec(mp, vaddrp));
783 
784 	case DDI_MO_MAP_HANDLE:
785 
786 		return (rootnex_map_handle(mp));
787 
788 	default:
789 		return (DDI_ME_UNIMPLEMENTED);
790 	}
791 }
792 
793 
794 /*
795  * rootnex_map_fault:
796  *
797  *	fault in mappings for requestors
798  */
799 /*ARGSUSED*/
800 static int
801 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
802 	struct hat *hat, struct seg *seg, caddr_t addr,
803 	struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
804 {
805 	extern struct seg_ops segdev_ops;
806 
807 #ifdef	DDI_MAP_DEBUG
808 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
809 	ddi_map_debug(" Seg <%s>\n",
810 	    seg->s_ops == &segdev_ops ? "segdev" :
811 	    seg == &kvseg ? "segkmem" : "NONE!");
812 #endif	/* DDI_MAP_DEBUG */
813 
814 	/*
815 	 * This is all terribly broken, but it is a start
816 	 *
817 	 * XXX	Note that this test means that segdev_ops
818 	 *	must be exported from seg_dev.c.
819 	 * XXX	What about devices with their own segment drivers?
820 	 */
821 	if (seg->s_ops == &segdev_ops) {
822 		struct segdev_data *sdp =
823 			(struct segdev_data *)seg->s_data;
824 
825 		if (hat == NULL) {
826 			/*
827 			 * This is one plausible interpretation of
828 			 * a null hat i.e. use the first hat on the
829 			 * address space hat list which by convention is
830 			 * the hat of the system MMU.  At alternative
831 			 * would be to panic .. this might well be better ..
832 			 */
833 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
834 			hat = seg->s_as->a_hat;
835 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
836 		}
837 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
838 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
839 	} else if (seg == &kvseg && dp == NULL) {
840 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
841 		    HAT_LOAD_LOCK);
842 	} else
843 		return (DDI_FAILURE);
844 	return (DDI_SUCCESS);
845 }
846 
847 
848 /*
849  * DMA routines- for all 80x86 machines.
850  */
851 
852 /*
853  * Shorthand defines
854  */
855 
856 #define	MAP	0
857 #define	BIND	1
858 #define	MAX_INT_BUF	(16*MMU_PAGESIZE)
859 #define	AHI_LIM		dma_lim->dlim_addr_hi
860 #define	AHI_ATTR	dma_attr->dma_attr_addr_hi
861 #define	OBJSIZE		dmareq->dmar_object.dmao_size
862 #define	OBJTYPE		dmareq->dmar_object.dmao_type
863 #define	FOURG		0x100000000ULL
864 #define	SIXTEEN_MB	0x1000000
865 
866 /* #define	DMADEBUG */
867 #if defined(DEBUG) || defined(lint)
868 #define	DMADEBUG
869 static int dmadebug = 0;
870 #define	DMAPRINT(a)	if (dmadebug) prom_printf a
871 #else
872 #define	DMAPRINT(a)	{ }
873 #endif	/* DEBUG */
874 
875 
876 
877 /*
878  * allocate DMA handle
879  */
880 static int
881 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
882     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
883 {
884 	ddi_dma_impl_t *hp;
885 	uint64_t maxsegmentsize_ll;
886 	uint_t maxsegmentsize;
887 
888 #ifdef lint
889 	dip = dip;
890 #endif
891 
892 	/*
893 	 * Validate the dma request.
894 	 */
895 #ifdef DMADEBUG
896 	if (attr->dma_attr_seg < MMU_PAGEOFFSET ||
897 	    attr->dma_attr_count_max < MMU_PAGEOFFSET ||
898 	    attr->dma_attr_granular > MMU_PAGESIZE ||
899 	    attr->dma_attr_maxxfer < MMU_PAGESIZE) {
900 		DMAPRINT((" bad_limits\n"));
901 		return (DDI_DMA_BADLIMITS);
902 	}
903 #endif
904 	/*
905 	 * validate the attribute structure. For now we do not support
906 	 * negative sgllen.
907 	 */
908 	if ((attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) ||
909 	    (attr->dma_attr_sgllen <= 0)) {
910 		return (DDI_DMA_BADATTR);
911 	}
912 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
913 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
914 	    attr->dma_attr_sgllen < 0) {
915 		return (DDI_DMA_BADATTR);
916 	}
917 
918 
919 	maxsegmentsize_ll = MIN(attr->dma_attr_seg,
920 	    MIN((attr->dma_attr_count_max + 1) *
921 	    attr->dma_attr_minxfer,
922 	    attr->dma_attr_maxxfer) - 1) + 1;
923 	/*
924 	 * We will calculate a 64 bit segment size, if the segment size
925 	 * is greater that 4G, we will limit it to (4G - 1).
926 	 * The size of dma object (ddi_dma_obj_t.dmao_size)
927 	 * is 32 bits.
928 	 */
929 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > FOURG))
930 		maxsegmentsize = FOURG - 1;
931 	else
932 		maxsegmentsize = maxsegmentsize_ll;
933 
934 	/*
935 	 * We should be able to DMA into every byte offset in a page.
936 	 */
937 	if (maxsegmentsize < MMU_PAGESIZE) {
938 		DMAPRINT((" bad_limits, maxsegmentsize\n"));
939 		return (DDI_DMA_BADLIMITS);
940 	}
941 
942 
943 	hp = kmem_zalloc(sizeof (*hp),
944 	    (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
945 	if (hp == NULL) {
946 		if (waitfp != DDI_DMA_DONTWAIT) {
947 			ddi_set_callback(waitfp, arg, &dvma_call_list_id);
948 		}
949 		return (DDI_DMA_NORESOURCES);
950 	}
951 	/*
952 	 * Preallocate space for cookie structures. We will use this when
953 	 * the request does not span more than (DMAI_SOMEMORE_COOKIES - 1)
954 	 * pages.
955 	 */
956 	hp->dmai_additionalcookiep =
957 	    kmem_zalloc(sizeof (ddi_dma_cookie_t) * DMAI_SOMEMORE_COOKIES,
958 		(waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
959 
960 	/*
961 	 * Save requestor's information
962 	 */
963 	hp->dmai_wins = NULL;
964 	hp->dmai_kaddr =
965 	hp->dmai_ibufp = NULL;
966 	hp->dmai_inuse = 0;
967 	hp->dmai_minxfer = attr->dma_attr_minxfer;
968 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
969 	hp->dmai_minfo = NULL;
970 	hp->dmai_rdip = rdip;
971 	hp->dmai_attr = *attr;
972 	hp->dmai_mctl = rootnex_dma_mctl;
973 	hp->dmai_segmentsize = maxsegmentsize;
974 	*handlep = (ddi_dma_handle_t)hp;
975 
976 	return (DDI_SUCCESS);
977 }
978 
979 /*ARGSUSED*/
980 static int
981 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
982     ddi_dma_handle_t handle)
983 {
984 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
985 
986 	/*
987 	 * free the additional cookie space.
988 	 */
989 	if (hp->dmai_additionalcookiep)
990 	    kmem_free(hp->dmai_additionalcookiep,
991 		sizeof (ddi_dma_cookie_t) * DMAI_SOMEMORE_COOKIES);
992 
993 	kmem_free(hp, sizeof (*hp));
994 	if (dvma_call_list_id)
995 		ddi_run_callback(&dvma_call_list_id);
996 	return (DDI_SUCCESS);
997 }
998 
999 static int
1000 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1001     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1002     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1003 {
1004 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
1005 	ddi_dma_attr_t *dma_attr = &hp->dmai_attr;
1006 	ddi_dma_cookie_t *cp;
1007 	impl_dma_segment_t *segp;
1008 	uint_t segcount = 1;
1009 	int rval;
1010 	struct priv_handle php;
1011 	uint_t	size, offset;
1012 	uint64_t padr;
1013 	major_t mnum;
1014 
1015 	/*
1016 	 * no mutex for speed
1017 	 */
1018 	if (hp->dmai_inuse) {
1019 		return (DDI_DMA_INUSE);
1020 	}
1021 	hp->dmai_inuse = 1;
1022 
1023 	size = OBJSIZE;
1024 	/*
1025 	 * get the physical address of the first page of an object
1026 	 * defined through 'dmareq' structure.
1027 	 */
1028 	padr = rootnex_get_phyaddr(dmareq, 0, &php);
1029 	offset = padr & MMU_PAGEOFFSET;
1030 	if (offset & (dma_attr->dma_attr_minxfer - 1)) {
1031 		DMAPRINT((" bad_limits/mapping\n"));
1032 		return (DDI_DMA_NOMAPPING);
1033 	} else if ((dma_attr->dma_attr_sgllen > 1) &&
1034 	    (size <= MMU_PAGESIZE) && (padr < AHI_ATTR)) {
1035 		/*
1036 		 * The object is not more than a PAGESIZE and we could DMA into
1037 		 * the physical page.
1038 		 * The cache is completely coherent, set the NOSYNC flag.
1039 		 */
1040 		hp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
1041 			DMP_NOSYNC;
1042 		/*
1043 		 * Fill in the physical address in the cookie pointer.
1044 		 */
1045 		cookiep->dmac_type = php.ph_mapinfo;
1046 		cookiep->dmac_laddress = padr;
1047 		if ((offset + size) <= MMU_PAGESIZE) {
1048 		    cookiep->dmac_size = size;
1049 		    hp->dmai_cookie = NULL;
1050 		    *ccountp = 1;
1051 		} else if (hp->dmai_additionalcookiep) {
1052 		/*
1053 		 * The object spans a page boundary. We will use the space
1054 		 * that we preallocated to store the additional cookie.
1055 		 */
1056 		    cookiep->dmac_size = MMU_PAGESIZE - offset;
1057 		    hp->dmai_cookie = hp->dmai_additionalcookiep;
1058 		    padr = rootnex_get_phyaddr(dmareq,
1059 			(uint_t)cookiep->dmac_size, &php);
1060 		    if (padr > AHI_ATTR) {
1061 			/*
1062 			 * We can not DMA into this physical page. We will
1063 			 * need intermediate buffers. Reset the state in
1064 			 * the php structure.
1065 			 */
1066 			padr = rootnex_get_phyaddr(dmareq, 0, &php);
1067 			goto io_brkup_attr;
1068 		    }
1069 		    hp->dmai_additionalcookiep->dmac_type = php.ph_mapinfo;
1070 		    hp->dmai_additionalcookiep->dmac_laddress = padr;
1071 		    hp->dmai_additionalcookiep->dmac_size =
1072 			size - cookiep->dmac_size;
1073 		    *ccountp = 2;
1074 		} else {
1075 			goto io_brkup_attr;
1076 		}
1077 		hp->dmai_kaddr = NULL;
1078 		hp->dmai_segp = NULL;
1079 		hp->dmai_ibufp = NULL;
1080 		return (DDI_DMA_MAPPED);
1081 	}
1082 io_brkup_attr:
1083 	/*
1084 	 * The function rootnex_get_phyaddr() does not save the physical
1085 	 * address in the php structure. Save it here for
1086 	 * rootnext_io_brkup_attr().
1087 	 */
1088 	php.ph_padr = padr;
1089 	rval =  rootnex_io_brkup_attr(dip, rdip, dmareq, handle, &php);
1090 	if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
1091 		hp->dmai_inuse = 0;
1092 		return (rval);
1093 	}
1094 	hp->dmai_wins = segp = hp->dmai_hds;
1095 	if (hp->dmai_ibufp) {
1096 		(void) rootnex_io_wtsync(hp, BIND);
1097 	}
1098 
1099 	while ((segp->dmais_flags & DMAIS_WINEND) == 0) {
1100 		segp = segp->dmais_link;
1101 		segcount++;
1102 	}
1103 	*ccountp = segcount;
1104 	cp = hp->dmai_cookie;
1105 	ASSERT(cp);
1106 	cookiep->dmac_type = cp->dmac_type;
1107 	cookiep->dmac_laddress = cp->dmac_laddress;
1108 	cookiep->dmac_size = cp->dmac_size;
1109 	hp->dmai_cookie++;
1110 
1111 	/*
1112 	 * If we ended up with more cookies that the caller specified as
1113 	 * the maximum that it can handle (sgllen), and they didn't specify
1114 	 * DDI_DMA_PARTIAL, cleanup and return failure.
1115 	 *
1116 	 * Not the cleanest fix, but lowest risk. The DMA code in
1117 	 * this file should get a good cleaning for some performance
1118 	 * improvement. This should be cleaned up also during that work.
1119 	 */
1120 	if ((dma_attr->dma_attr_sgllen < *ccountp) &&
1121 	    ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)) {
1122 
1123 		mnum = ddi_driver_major(rdip);
1124 
1125 		/*
1126 		 * patchable which allows us to print one warning per major
1127 		 * number.
1128 		 */
1129 		if ((rootnex_bind_warn) &&
1130 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
1131 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
1132 			cmn_err(CE_WARN, "!%s: coding error detected, the "
1133 			    "driver is using ddi_dma_attr(9S) incorrectly. "
1134 			    "There is a small risk of data corruption in "
1135 			    "particular with large I/Os. The driver should be "
1136 			    "replaced with a corrected version for proper "
1137 			    "system operation. To disable this warning, add "
1138 			    "'set rootnex:rootnex_bind_warn=0' to "
1139 			    "/etc/system(4).", ddi_driver_name(rdip));
1140 		}
1141 
1142 		/*
1143 		 * Patchable which allows us to fail or pass the bind. The
1144 		 * correct behavior should be to fail the bind. To be safe for
1145 		 * now, the patchable allows the previous behavior to be set
1146 		 * via /etc/system
1147 		 */
1148 		if (rootnex_bind_fail) {
1149 			if (hp->dmai_ibufp)
1150 				ddi_mem_free(hp->dmai_ibufp);
1151 			if (hp->dmai_kaddr)
1152 				vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE);
1153 			if (hp->dmai_segp)
1154 				kmem_free(hp->dmai_segp, hp->dmai_kmsize);
1155 			hp->dmai_inuse = 0;
1156 			*ccountp = 0;
1157 
1158 			return (DDI_DMA_TOOBIG);
1159 		}
1160 	}
1161 
1162 	return (rval);
1163 }
1164 
1165 /*ARGSUSED*/
1166 static int
1167 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1168     ddi_dma_handle_t handle)
1169 {
1170 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
1171 	int rval = DDI_SUCCESS;
1172 
1173 	if (hp->dmai_ibufp) {
1174 		rval = rootnex_io_rdsync(hp);
1175 		ddi_mem_free(hp->dmai_ibufp);
1176 	}
1177 	if (hp->dmai_kaddr)
1178 		vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE);
1179 	if (hp->dmai_segp)
1180 		kmem_free(hp->dmai_segp, hp->dmai_kmsize);
1181 	if (dvma_call_list_id)
1182 		ddi_run_callback(&dvma_call_list_id);
1183 	hp->dmai_inuse = 0;
1184 	return (rval);
1185 }
1186 
1187 /*ARGSUSED*/
1188 static int
1189 rootnex_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1190     ddi_dma_handle_t handle, off_t off, size_t len,
1191     uint_t cache_flags)
1192 {
1193 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
1194 	int rval = DDI_SUCCESS;
1195 
1196 	if (hp->dmai_ibufp) {
1197 		if (cache_flags == DDI_DMA_SYNC_FORDEV) {
1198 			rval = rootnex_io_wtsync(hp, MAP);
1199 		} else {
1200 			rval =  rootnex_io_rdsync(hp);
1201 		}
1202 	}
1203 	return (rval);
1204 }
1205 
1206 /*ARGSUSED*/
1207 static int
1208 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
1209     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1210     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1211 {
1212 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
1213 	impl_dma_segment_t *segp, *winp = hp->dmai_hds;
1214 	uint_t len, segcount = 1;
1215 	ddi_dma_cookie_t *cp;
1216 	int i;
1217 
1218 	/*
1219 	 * win is in the range [0 .. dmai_nwin-1]
1220 	 */
1221 	if (win >= hp->dmai_nwin) {
1222 		return (DDI_FAILURE);
1223 	}
1224 	if (hp->dmai_wins && hp->dmai_ibufp) {
1225 		(void) rootnex_io_rdsync(hp);
1226 	}
1227 	ASSERT(winp->dmais_flags & DMAIS_WINSTRT);
1228 	for (i = 0; i < win; i++) {
1229 		winp = winp->_win._dmais_nex;
1230 		ASSERT(winp);
1231 		ASSERT(winp->dmais_flags & DMAIS_WINSTRT);
1232 	}
1233 
1234 	hp->dmai_wins = (impl_dma_segment_t *)winp;
1235 	if (hp->dmai_ibufp)
1236 		(void) rootnex_io_wtsync(hp, BIND);
1237 	segp = winp;
1238 	len = segp->dmais_size;
1239 	*offp = segp->dmais_ofst;
1240 	while ((segp->dmais_flags & DMAIS_WINEND) == 0) {
1241 		segp = segp->dmais_link;
1242 		len += segp->dmais_size;
1243 		segcount++;
1244 	}
1245 
1246 	*lenp = len;
1247 	*ccountp = segcount;
1248 	cp = hp->dmai_cookie = winp->dmais_cookie;
1249 	ASSERT(cp);
1250 	cookiep->dmac_type = cp->dmac_type;
1251 	cookiep->dmac_laddress = cp->dmac_laddress;
1252 	cookiep->dmac_size = cp->dmac_size;
1253 	hp->dmai_cookie++;
1254 	DMAPRINT(("getwin win %p mapping %llx size %lx\n",
1255 	    (void *)winp, (unsigned long long)cp->dmac_laddress,
1256 	    cp->dmac_size));
1257 
1258 	return (DDI_SUCCESS);
1259 }
1260 
1261 static int
1262 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1263     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
1264 {
1265 	ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
1266 	impl_dma_segment_t *segmentp;
1267 	ddi_dma_impl_t *hp;
1268 	struct priv_handle php;
1269 	uint64_t padr;
1270 	uint_t offset, size;
1271 	int sizehandle;
1272 	int mapinfo;
1273 
1274 #ifdef lint
1275 	dip = dip;
1276 #endif
1277 
1278 	DMAPRINT(("dma_map: %s (%s) reqp %p ", (handlep)? "alloc" : "advisory",
1279 	    ddi_get_name(rdip), (void *)dmareq));
1280 
1281 #ifdef	DMADEBUG
1282 	/*
1283 	 * Validate range checks on DMA limits
1284 	 */
1285 	if ((dma_lim->dlim_adreg_max & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
1286 	    dma_lim->dlim_granular > MMU_PAGESIZE ||
1287 	    dma_lim->dlim_sgllen <= 0) {
1288 		DMAPRINT((" bad_limits\n"));
1289 		return (DDI_DMA_BADLIMITS);
1290 	}
1291 #endif
1292 	size = OBJSIZE;
1293 	/*
1294 	 * get the physical address of the first page of an object
1295 	 * defined through 'dmareq' structure.
1296 	 */
1297 	padr = rootnex_get_phyaddr(dmareq, 0, &php);
1298 	mapinfo = php.ph_mapinfo;
1299 	offset = padr & MMU_PAGEOFFSET;
1300 	if (offset & (dma_lim->dlim_minxfer - 1)) {
1301 		DMAPRINT((" bad_limits/mapping\n"));
1302 		return (DDI_DMA_NOMAPPING);
1303 	} else if (((offset + size) < MMU_PAGESIZE) && (padr < AHI_LIM)) {
1304 		/*
1305 		 * The object is less than a PAGESIZE and we could DMA into
1306 		 * the physical page.
1307 		 */
1308 		if (!handlep)
1309 			return (DDI_DMA_MAPOK);
1310 		sizehandle = sizeof (ddi_dma_impl_t) +
1311 		    sizeof (impl_dma_segment_t);
1312 
1313 		hp = kmem_alloc(sizehandle, (dmareq->dmar_fp == DDI_DMA_SLEEP) ?
1314 		    KM_SLEEP : KM_NOSLEEP);
1315 		if (!hp) {
1316 			/* let other routine do callback */
1317 			goto breakup_req;
1318 		}
1319 		hp->dmai_kmsize = sizehandle;
1320 
1321 		/*
1322 		 * locate segments after dma_impl handle structure
1323 		 */
1324 		segmentp = (impl_dma_segment_t *)(hp + 1);
1325 
1326 		/* FMA related initialization */
1327 		hp->dmai_fault = 0;
1328 		hp->dmai_fault_check = NULL;
1329 		hp->dmai_fault_notify = NULL;
1330 		hp->dmai_error.err_ena = 0;
1331 		hp->dmai_error.err_status = DDI_FM_OK;
1332 		hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
1333 		hp->dmai_error.err_ontrap = NULL;
1334 		hp->dmai_error.err_fep = NULL;
1335 
1336 		/*
1337 		 * Save requestor's information
1338 		 */
1339 		hp->dmai_minxfer = dma_lim->dlim_minxfer;
1340 		hp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1341 		hp->dmai_rdip = rdip;
1342 		hp->dmai_mctl = rootnex_dma_mctl;
1343 		hp->dmai_wins = NULL;
1344 		hp->dmai_kaddr = hp->dmai_ibufp = NULL;
1345 		hp->dmai_hds = segmentp;
1346 		hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1347 		hp->dmai_minfo = (void *)(uintptr_t)mapinfo;
1348 		hp->dmai_object = dmareq->dmar_object;
1349 		if (mapinfo == DMAMI_PAGES) {
1350 			segmentp->_vdmu._dmais_pp = php.ph_u.pp;
1351 			segmentp->dmais_ofst = (uint_t)offset;
1352 		} else {
1353 			segmentp->_vdmu._dmais_va = php.ph_vaddr;
1354 			segmentp->dmais_ofst = 0;
1355 		}
1356 		segmentp->_win._dmais_nex = NULL;
1357 		segmentp->dmais_link = NULL;
1358 		segmentp->_pdmu._dmais_lpd = padr;
1359 		segmentp->dmais_size = size;
1360 		segmentp->dmais_flags = DMAIS_WINSTRT | DMAIS_WINEND;
1361 		segmentp->dmais_hndl = hp;
1362 		*handlep = (ddi_dma_handle_t)hp;
1363 		DMAPRINT(("	QUICKIE handle %p\n", (void *)hp));
1364 		return (DDI_DMA_MAPPED);
1365 	} else if (!handlep) {
1366 		return (DDI_DMA_NOMAPPING);
1367 	}
1368 breakup_req:
1369 	/*
1370 	 * The function rootnex_get_phyaddr() does not save the physical
1371 	 * address in the php structure. Save it here for
1372 	 * rootnext_io_brkup_attr().
1373 	 */
1374 	php.ph_padr = padr;
1375 	return (rootnex_io_brkup_lim(dip, rdip,  dmareq, handlep,
1376 		dma_lim, &php));
1377 }
1378 
1379 /* CSTYLED */
1380 #define	CAN_COMBINE(psegp, paddr, segsize, sgsize,  mxsegsize, attr, flg) \
1381 ((psegp)								&& \
1382 ((psegp)->_pdmu._dmais_lpd + (psegp)->dmais_size) == (paddr)	&& \
1383 (((psegp)->dmais_flags & (DMAIS_NEEDINTBUF | DMAIS_COMPLEMENT)) == 0) && \
1384 (((flg) & DMAIS_NEEDINTBUF) == 0)					&& \
1385 (((psegp)->dmais_size + (segsize)) <= (mxsegsize))			&& \
1386 ((paddr) & (attr)->dma_attr_seg))
1387 
1388 /* CSTYLED */
1389 #define	MARK_WIN_END(segp, prvwinp, cwinp) \
1390 (segp)->dmais_flags |= DMAIS_WINEND;	\
1391 (prvwinp) = (cwinp);			\
1392 (cwinp)->dmais_flags |= DMAIS_WINUIB;	\
1393 (cwinp) = NULL;
1394 
1395 /*
1396  * This function works with the ddi_dma_attr structure.
1397  * Bugs fixed
1398  * 1. The old code would ignore the size of the first segment when
1399  *	computing the total size of the reuqest (sglistsize) for sgllen == 1
1400  */
1401 
1402 /*ARGSUSED*/
1403 int
1404 rootnex_io_brkup_attr(dev_info_t *dip, dev_info_t *rdip,
1405     struct ddi_dma_req *dmareq, ddi_dma_handle_t handle,
1406     struct priv_handle *php)
1407 {
1408 	impl_dma_segment_t *segmentp;
1409 	impl_dma_segment_t *curwinp;
1410 	impl_dma_segment_t *previousp;
1411 	impl_dma_segment_t *prewinp;
1412 	ddi_dma_cookie_t *cookiep;
1413 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
1414 	caddr_t basevadr;
1415 	caddr_t segmentvadr;
1416 	uint64_t segmentpadr;
1417 	uint_t maxsegmentsize, sizesegment, residual_size;
1418 	uint_t offset, needintbuf, sglistsize, trim;
1419 	int nsegments;
1420 	int mapinfo;
1421 	int reqneedintbuf;
1422 	int rval;
1423 	int segment_flags, win_flags;
1424 	int sgcount;
1425 	int wcount;
1426 	ddi_dma_attr_t *dma_attr = &hp->dmai_attr;
1427 	int sizehandle;
1428 
1429 #ifdef lint
1430 	dip = dip;
1431 #endif
1432 
1433 	/*
1434 	 * Initialize our local variables from the php structure.
1435 	 * rootnex_get_phyaddr() has populated php structure on its
1436 	 * previous invocation in rootnex_dma_bindhdl().
1437 	 */
1438 	residual_size = OBJSIZE;
1439 	mapinfo = php->ph_mapinfo;
1440 	segmentpadr = php->ph_padr;
1441 	segmentvadr =  php->ph_vaddr;
1442 	basevadr = (mapinfo == DMAMI_PAGES) ? 0 : segmentvadr;
1443 	offset = segmentpadr & MMU_PAGEOFFSET;
1444 	/*
1445 	 * maxsegmentsize was computed and saved in rootnex_dma_allochdl().
1446 	 */
1447 	maxsegmentsize = hp->dmai_segmentsize;
1448 
1449 	/*
1450 	 * The number of segments is the number of 4k pages that the
1451 	 * object spans.
1452 	 * Each 4k segment may need another segment to satisfy
1453 	 * device granularity reqirements.
1454 	 * We will never need more than two segments per page.
1455 	 * This may be an overestimate in some cases but it avoids
1456 	 * 64 bit divide operations.
1457 	 */
1458 	nsegments = (offset + residual_size + MMU_PAGEOFFSET) >>
1459 	    (MMU_PAGESHIFT - 1);
1460 
1461 
1462 
1463 	sizehandle = nsegments * (sizeof (impl_dma_segment_t) +
1464 		    sizeof (ddi_dma_cookie_t));
1465 
1466 	hp->dmai_segp = kmem_zalloc(sizehandle,
1467 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1468 	if (!hp->dmai_segp) {
1469 		rval = DDI_DMA_NORESOURCES;
1470 		goto bad;
1471 	}
1472 	hp->dmai_kmsize = sizehandle;
1473 	segmentp = (impl_dma_segment_t *)hp->dmai_segp;
1474 	cookiep = (ddi_dma_cookie_t *)(segmentp + nsegments);
1475 	hp->dmai_cookie = cookiep;
1476 	hp->dmai_wins = NULL;
1477 	hp->dmai_kaddr = hp->dmai_ibufp = NULL;
1478 	hp->dmai_hds = prewinp = segmentp;
1479 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1480 	hp->dmai_minfo = (void *)(uintptr_t)mapinfo;
1481 	hp->dmai_object = dmareq->dmar_object;
1482 
1483 	/*
1484 	 * Breakup the memory object
1485 	 * and build an i/o segment at each boundary condition
1486 	 */
1487 	curwinp = 0;
1488 	needintbuf = 0;
1489 	previousp = 0;
1490 	reqneedintbuf = 0;
1491 	sglistsize = 0;
1492 	wcount = 0;
1493 	sgcount = 1;
1494 	do {
1495 		sizesegment = MIN((MMU_PAGESIZE - offset), residual_size);
1496 		segment_flags = (segmentpadr > AHI_ATTR) ? DMAIS_NEEDINTBUF : 0;
1497 		sglistsize += sizesegment;
1498 		if (sglistsize >= dma_attr->dma_attr_maxxfer) {
1499 			/*
1500 			 * limit the number of bytes to dma_attr_maxxfer
1501 			 */
1502 			sizesegment -=
1503 			    (sglistsize - dma_attr->dma_attr_maxxfer);
1504 			sglistsize = dma_attr->dma_attr_maxxfer;
1505 			sgcount = dma_attr->dma_attr_sgllen + 1;
1506 		}
1507 		if ((dma_attr->dma_attr_sgllen == 1) &&
1508 		    (segmentpadr & (dma_attr->dma_attr_granular - 1)) &&
1509 		    (residual_size != sizesegment)) {
1510 			/*
1511 			 * _no_ scatter/gather capability,
1512 			 * so ensure that size of each segment is a
1513 			 * multiple of dma_attr_granular (== sector size)
1514 			 */
1515 			sizesegment = MIN((uint_t)MMU_PAGESIZE, residual_size);
1516 			segment_flags |= DMAIS_NEEDINTBUF;
1517 			sglistsize = sizesegment;
1518 		}
1519 		if (CAN_COMBINE(previousp, segmentpadr, sizesegment,
1520 		    sglistsize, maxsegmentsize, dma_attr, segment_flags)) {
1521 		    previousp->dmais_flags |= segment_flags;
1522 		    previousp->dmais_size += sizesegment;
1523 		    previousp->dmais_cookie->dmac_size += sizesegment;
1524 		} else {
1525 		    if (dma_attr->dma_attr_sgllen == 1)
1526 			/*
1527 			 * If we can not combine this segment with the
1528 			 * previous segment or if there are no previous
1529 			 * segments, sglistsize should be set to
1530 			 * segmentsize.
1531 			 */
1532 			sglistsize = sizesegment;
1533 
1534 		    if (previousp) {
1535 			previousp->dmais_link = segmentp;
1536 		    }
1537 		    segmentp->dmais_cookie = cookiep;
1538 		    segmentp->dmais_hndl = hp;
1539 		    if (curwinp == 0) {
1540 			prewinp->_win._dmais_nex = curwinp = segmentp;
1541 			segment_flags |= DMAIS_WINSTRT;
1542 			win_flags = segment_flags;
1543 			wcount++;
1544 		    } else {
1545 			segmentp->_win._dmais_cur = curwinp;
1546 			win_flags |= segment_flags;
1547 		    }
1548 		    segmentp->dmais_ofst = segmentvadr - basevadr;
1549 		    if (mapinfo == DMAMI_PAGES)
1550 			segmentp->_vdmu._dmais_pp = php->ph_u.pp;
1551 		    else
1552 			segmentp->_vdmu._dmais_va = (caddr_t)segmentvadr;
1553 		    segmentp->_pdmu._dmais_lpd = segmentpadr;
1554 		    segmentp->dmais_flags = (ushort_t)segment_flags;
1555 		    segmentp->dmais_size = sizesegment;
1556 		    cookiep->dmac_laddress = segmentpadr;
1557 		    cookiep->dmac_type = (ulong_t)segmentp;
1558 		    cookiep->dmac_size = sizesegment;
1559 		    cookiep++;
1560 		    --nsegments;
1561 		    if (dma_attr->dma_attr_sgllen > 1)
1562 			sgcount++;
1563 		    if (segment_flags & DMAIS_NEEDINTBUF) {
1564 			if ((dma_attr->dma_attr_sgllen > 1) &&
1565 			    (needintbuf += ptob(btopr(sizesegment)))
1566 				== MAX_INT_BUF) {
1567 				/*
1568 				 * Intermediate buffers need not be contiguous.
1569 				 * we allocate a page of intermediate buffer
1570 				 * for every segment.
1571 				 */
1572 			    reqneedintbuf = needintbuf;
1573 			    needintbuf = 0;
1574 			    sgcount = dma_attr->dma_attr_sgllen + 1;
1575 			    MARK_WIN_END(segmentp, prewinp, curwinp);
1576 			} else if (dma_attr->dma_attr_sgllen == 1) {
1577 			    needintbuf = MMU_PAGESIZE;
1578 			    MARK_WIN_END(segmentp, prewinp, curwinp);
1579 			}
1580 		    }
1581 		    previousp = segmentp++;
1582 		}
1583 
1584 		if (sgcount > dma_attr->dma_attr_sgllen) {
1585 		    previousp->dmais_flags |= DMAIS_COMPLEMENT;
1586 		    sgcount = 1;
1587 		    trim = sglistsize & (dma_attr->dma_attr_granular - 1);
1588 
1589 		    if ((sizesegment != residual_size) &&
1590 			(trim == sizesegment)) {
1591 
1592 			/*
1593 			 * Normally we would trim the buffer to make it a
1594 			 * multiple of the granularity. But in this case,
1595 			 * the size is < the granularity so we'll roll back
1596 			 * this segment and pick this up the next time around.
1597 			 *
1598 			 * This case occurs when sgcount naturally (i.e. not
1599 			 * forced) is greater than > dma_attr_sgllen. In this
1600 			 * case, if the very next segment fills up the
1601 			 * intermediate buffer, and the amount required to fill
1602 			 * the intermediate buffer < granularity, we would end
1603 			 * up with a zero sized cookie if we didn't roll back
1604 			 * the segment.
1605 			 */
1606 
1607 			/*
1608 			 * Make sure we really understand the code path here,
1609 			 * we should only get here if we are at an end of a
1610 			 * window which is a single page long < granularity
1611 			 */
1612 			ASSERT(previousp->dmais_flags & DMAIS_WINEND);
1613 			ASSERT(sizesegment == sglistsize);
1614 
1615 			/* Zero out this segment and add it back to the count */
1616 			sizesegment = 0;
1617 			sglistsize = 0;
1618 			nsegments++;
1619 
1620 			/* fix the segment and cookie pointers */
1621 			segmentp = previousp;
1622 			bzero(previousp, sizeof (impl_dma_segment_t));
1623 			previousp--;
1624 			bzero(cookiep, sizeof (ddi_dma_cookie_t));
1625 			cookiep--;
1626 
1627 			/*
1628 			 * cleanup the new previous pointer. Make sure we
1629 			 * carry over the WINEND maker.
1630 			 */
1631 			previousp->dmais_link = NULL;
1632 			previousp->dmais_flags |= DMAIS_WINEND;
1633 
1634 		    } else if ((sizesegment != residual_size) && trim) {
1635 			/*
1636 			 * end of a scatter/gather list!
1637 			 * ensure that total length of list is a
1638 			 * multiple of granular (sector size)
1639 			 */
1640 			previousp->dmais_size -= trim;
1641 			previousp->dmais_cookie->dmac_size -= trim;
1642 			sizesegment -= trim;
1643 		    }
1644 		    sglistsize = 0;
1645 		}
1646 		if (sizesegment && (residual_size -= sizesegment)) {
1647 			/*
1648 			 * Get the physical address of the next page in the
1649 			 * dma object.
1650 			 */
1651 			segmentpadr =
1652 			    rootnex_get_phyaddr(dmareq, sizesegment, php);
1653 			offset = segmentpadr & MMU_PAGEOFFSET;
1654 			segmentvadr += sizesegment;
1655 		}
1656 	} while (residual_size && nsegments);
1657 	ASSERT(residual_size == 0);
1658 
1659 	previousp->dmais_link = NULL;
1660 	previousp->dmais_flags |= DMAIS_WINEND;
1661 	if (curwinp) {
1662 		if (win_flags & DMAIS_NEEDINTBUF)
1663 			curwinp->dmais_flags |= DMAIS_WINUIB;
1664 		curwinp->_win._dmais_nex = NULL;
1665 	} else
1666 		prewinp->_win._dmais_nex = NULL;
1667 
1668 	if ((needintbuf = MAX(needintbuf, reqneedintbuf)) != 0) {
1669 		uint64_t	saved_align;
1670 
1671 		saved_align = dma_attr->dma_attr_align;
1672 		/*
1673 		 * Allocate intermediate buffer. To start with we request
1674 		 * for a page aligned area. This request is satisfied from
1675 		 * the system page free list pool.
1676 		 */
1677 		dma_attr->dma_attr_align = MMU_PAGESIZE;
1678 		if (i_ddi_mem_alloc(dip, dma_attr, needintbuf,
1679 		    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? 0x1 : 0, 1, 0,
1680 		    &hp->dmai_ibufp, (ulong_t *)&hp->dmai_ibfsz,
1681 		    NULL) != DDI_SUCCESS) {
1682 			dma_attr->dma_attr_align = saved_align;
1683 			rval = DDI_DMA_NORESOURCES;
1684 			goto bad;
1685 		}
1686 		if (mapinfo != DMAMI_KVADR) {
1687 			hp->dmai_kaddr = vmem_alloc(heap_arena, PAGESIZE,
1688 			    VM_SLEEP);
1689 		}
1690 		dma_attr->dma_attr_align = saved_align;
1691 	}
1692 
1693 	/*
1694 	 * return success
1695 	 */
1696 	ASSERT(wcount > 0);
1697 	if (wcount == 1) {
1698 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1699 		rval = DDI_DMA_MAPPED;
1700 	} else if (hp->dmai_rflags & DDI_DMA_PARTIAL) {
1701 		rval = DDI_DMA_PARTIAL_MAP;
1702 	} else {
1703 		if (hp->dmai_segp)
1704 			kmem_free(hp->dmai_segp, hp->dmai_kmsize);
1705 		return (DDI_DMA_TOOBIG);
1706 	}
1707 	hp->dmai_nwin = wcount;
1708 	return (rval);
1709 bad:
1710 	hp->dmai_cookie = NULL;
1711 	if (hp->dmai_segp)
1712 		kmem_free(hp->dmai_segp, hp->dmai_kmsize);
1713 	if (rval == DDI_DMA_NORESOURCES 	&&
1714 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT &&
1715 	    dmareq->dmar_fp != DDI_DMA_SLEEP)
1716 		ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg,
1717 		    &dvma_call_list_id);
1718 	return (rval);
1719 }
1720 
1721 /*
1722  * This function works with the limit structure and does 32 bit arithmetic.
1723  */
1724 int
1725 rootnex_io_brkup_lim(dev_info_t *dip, dev_info_t *rdip,
1726     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep,
1727     ddi_dma_lim_t *dma_lim, struct priv_handle *php)
1728 {
1729 	impl_dma_segment_t *segmentp;
1730 	impl_dma_segment_t *curwinp;
1731 	impl_dma_segment_t *previousp;
1732 	impl_dma_segment_t *prewinp;
1733 	ddi_dma_impl_t *hp = 0;
1734 	caddr_t basevadr;
1735 	caddr_t segmentvadr;
1736 	uint64_t segmentpadr;
1737 	uint_t maxsegmentsize, sizesegment;
1738 	uint_t needintbuf;
1739 	uint_t offset;
1740 	uint_t residual_size;
1741 	uint_t sglistsize;
1742 	int nsegments;
1743 	int mapinfo;
1744 	int reqneedintbuf;
1745 	int rval;
1746 	int segment_flags, win_flags;
1747 	int sgcount;
1748 	int wcount;
1749 #ifdef DMADEBUG
1750 	int numsegments;
1751 #endif
1752 	int sizehandle;
1753 
1754 #ifdef lint
1755 	dip = dip;
1756 #endif
1757 
1758 	/*
1759 	 * Validate the dma request.
1760 	 */
1761 #ifdef DMADEBUG
1762 	if (dma_lim->dlim_adreg_max < MMU_PAGEOFFSET ||
1763 	    dma_lim->dlim_ctreg_max < MMU_PAGEOFFSET ||
1764 	    dma_lim->dlim_granular > MMU_PAGESIZE ||
1765 	    dma_lim->dlim_reqsize < MMU_PAGESIZE) {
1766 		DMAPRINT((" bad_limits\n"));
1767 		return (DDI_DMA_BADLIMITS);
1768 	}
1769 #endif
1770 
1771 	/*
1772 	 * Initialize our local variables from the php structure.
1773 	 * rootnex_get_phyaddr() has populated php structure on its
1774 	 * previous invocation in rootnex_dma_map().
1775 	 */
1776 	residual_size = OBJSIZE;
1777 	mapinfo = php->ph_mapinfo;
1778 	segmentpadr = php->ph_padr;
1779 	segmentvadr =  php->ph_vaddr;
1780 	basevadr = (mapinfo == DMAMI_PAGES) ? 0 : segmentvadr;
1781 	offset = segmentpadr & MMU_PAGEOFFSET;
1782 	if (dma_lim->dlim_sgllen <= 0 ||
1783 	    (offset & (dma_lim->dlim_minxfer - 1))) {
1784 		DMAPRINT((" bad_limits/mapping\n"));
1785 		rval = DDI_DMA_NOMAPPING;
1786 		goto bad;
1787 	}
1788 
1789 	maxsegmentsize = MIN(dma_lim->dlim_adreg_max,
1790 	    MIN((dma_lim->dlim_ctreg_max + 1) * dma_lim->dlim_minxfer,
1791 	    dma_lim->dlim_reqsize) - 1) + 1;
1792 	if (maxsegmentsize == 0)
1793 		maxsegmentsize = FOURG - 1;
1794 	if (maxsegmentsize < MMU_PAGESIZE) {
1795 		DMAPRINT((" bad_limits, maxsegmentsize\n"));
1796 		rval = DDI_DMA_BADLIMITS;
1797 		goto bad;
1798 	}
1799 
1800 
1801 	/*
1802 	 * The number of segments is the number of 4k pages that the
1803 	 * object spans.
1804 	 * Each 4k segment may need another segment to satisfy
1805 	 * device granularity reqirements.
1806 	 * We will never need more than two segments per page.
1807 	 * This may be an overestimate in some cases but it avoids
1808 	 * 64 bit divide operations.
1809 	 */
1810 	nsegments = (offset + residual_size + MMU_PAGEOFFSET) >>
1811 	    (MMU_PAGESHIFT - 1);
1812 
1813 #ifdef DMADEBUG
1814 	numsegments = nsegments;
1815 #endif
1816 	ASSERT(nsegments > 0);
1817 
1818 
1819 	sizehandle = sizeof (ddi_dma_impl_t) +
1820 		(nsegments * sizeof (impl_dma_segment_t));
1821 
1822 	hp = kmem_alloc(sizehandle,
1823 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1824 	if (!hp) {
1825 		rval = DDI_DMA_NORESOURCES;
1826 		goto bad;
1827 	}
1828 	hp->dmai_kmsize = sizehandle;
1829 
1830 	/*
1831 	 * locate segments after dma_impl handle structure
1832 	 */
1833 	segmentp = (impl_dma_segment_t *)(hp + 1);
1834 
1835 	/* FMA related initialization */
1836 	hp->dmai_fault = 0;
1837 	hp->dmai_fault_check = NULL;
1838 	hp->dmai_fault_notify = NULL;
1839 	hp->dmai_error.err_ena = 0;
1840 	hp->dmai_error.err_status = DDI_FM_OK;
1841 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
1842 	hp->dmai_error.err_ontrap = NULL;
1843 	hp->dmai_error.err_fep = NULL;
1844 
1845 	/*
1846 	 * Save requestor's information
1847 	 */
1848 	hp->dmai_minxfer = dma_lim->dlim_minxfer;
1849 	hp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1850 	hp->dmai_rdip = rdip;
1851 	hp->dmai_mctl = rootnex_dma_mctl;
1852 	hp->dmai_wins = NULL;
1853 	hp->dmai_kaddr = hp->dmai_ibufp = NULL;
1854 	hp->dmai_hds = prewinp = segmentp;
1855 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1856 	hp->dmai_minfo = (void *)(uintptr_t)mapinfo;
1857 	hp->dmai_object = dmareq->dmar_object;
1858 
1859 	/*
1860 	 * Breakup the memory object
1861 	 * and build an i/o segment at each boundary condition
1862 	 */
1863 	curwinp = 0;
1864 	needintbuf = 0;
1865 	previousp = 0;
1866 	reqneedintbuf = 0;
1867 	sglistsize = 0;
1868 	wcount = 0;
1869 	sgcount = 1;
1870 	do {
1871 		sizesegment =
1872 		    MIN(((uint_t)MMU_PAGESIZE - offset), residual_size);
1873 		segment_flags = (segmentpadr > AHI_LIM) ? DMAIS_NEEDINTBUF : 0;
1874 
1875 		if (dma_lim->dlim_sgllen == 1) {
1876 			/*
1877 			 * _no_ scatter/gather capability,
1878 			 * so ensure that size of each segment is a
1879 			 * multiple of dlim_granular (== sector size)
1880 			 */
1881 			if ((segmentpadr & (dma_lim->dlim_granular - 1)) &&
1882 			    residual_size != sizesegment) {
1883 				/*
1884 				 * this segment needs an intermediate buffer
1885 				 */
1886 				sizesegment =
1887 				    MIN((uint_t)MMU_PAGESIZE, residual_size);
1888 				segment_flags |= DMAIS_NEEDINTBUF;
1889 			}
1890 		}
1891 
1892 		if (previousp &&
1893 		    (previousp->_pdmu._dmais_lpd + previousp->dmais_size) ==
1894 		    segmentpadr &&
1895 		    (previousp->dmais_flags &
1896 		    (DMAIS_NEEDINTBUF | DMAIS_COMPLEMENT)) == 0 &&
1897 		    (segment_flags & DMAIS_NEEDINTBUF) == 0 &&
1898 		    (previousp->dmais_size + sizesegment) <= maxsegmentsize &&
1899 		    (segmentpadr & dma_lim->dlim_adreg_max) &&
1900 		    (sglistsize + sizesegment) <= dma_lim->dlim_reqsize) {
1901 			/*
1902 			 * combine new segment with previous segment
1903 			 */
1904 			previousp->dmais_flags |= segment_flags;
1905 			previousp->dmais_size += sizesegment;
1906 			if ((sglistsize += sizesegment) ==
1907 			    dma_lim->dlim_reqsize)
1908 				/*
1909 				 * force end of scatter/gather list
1910 				 */
1911 				sgcount = dma_lim->dlim_sgllen + 1;
1912 		} else {
1913 			/*
1914 			 * add new segment to linked list
1915 			 */
1916 			if (previousp) {
1917 				previousp->dmais_link = segmentp;
1918 			}
1919 			segmentp->dmais_hndl = hp;
1920 			if (curwinp == 0) {
1921 				prewinp->_win._dmais_nex =
1922 				    curwinp = segmentp;
1923 				segment_flags |= DMAIS_WINSTRT;
1924 				win_flags = segment_flags;
1925 				wcount++;
1926 			} else {
1927 				segmentp->_win._dmais_cur = curwinp;
1928 				win_flags |= segment_flags;
1929 			}
1930 			segmentp->dmais_ofst = segmentvadr - basevadr;
1931 			if (mapinfo == DMAMI_PAGES) {
1932 				segmentp->_vdmu._dmais_pp = php->ph_u.pp;
1933 			} else {
1934 				segmentp->_vdmu._dmais_va = segmentvadr;
1935 			}
1936 			segmentp->_pdmu._dmais_lpd = segmentpadr;
1937 			segmentp->dmais_flags = (ushort_t)segment_flags;
1938 
1939 			if (dma_lim->dlim_sgllen > 1) {
1940 				if (segment_flags & DMAIS_NEEDINTBUF) {
1941 					needintbuf += ptob(btopr(sizesegment));
1942 					if (needintbuf >= MAX_INT_BUF) {
1943 						/*
1944 						 * limit size of intermediate
1945 						 * buffer
1946 						 */
1947 						reqneedintbuf = MAX_INT_BUF;
1948 						needintbuf = 0;
1949 						/*
1950 						 * end of current window
1951 						 */
1952 						segmentp->dmais_flags |=
1953 						    DMAIS_WINEND;
1954 						prewinp = curwinp;
1955 						curwinp->dmais_flags |=
1956 						    DMAIS_WINUIB;
1957 						curwinp = NULL;
1958 						/*
1959 						 * force end of scatter/gather
1960 						 * list
1961 						 */
1962 						sgcount = dma_lim->dlim_sgllen;
1963 					}
1964 				}
1965 				sglistsize += sizesegment;
1966 				if (sglistsize >= dma_lim->dlim_reqsize) {
1967 					/*
1968 					 * limit size of xfer
1969 					 */
1970 					sizesegment -= (sglistsize -
1971 					    dma_lim->dlim_reqsize);
1972 					sglistsize = dma_lim->dlim_reqsize;
1973 					sgcount = dma_lim->dlim_sgllen;
1974 				}
1975 				sgcount++;
1976 			} else {
1977 				/*
1978 				 * _no_ scatter/gather capability,
1979 				 */
1980 				if (segment_flags & DMAIS_NEEDINTBUF) {
1981 					/*
1982 					 * end of window
1983 					 */
1984 					needintbuf = MMU_PAGESIZE;
1985 					segmentp->dmais_flags |= DMAIS_WINEND;
1986 					prewinp = curwinp;
1987 					curwinp->dmais_flags |= DMAIS_WINUIB;
1988 					curwinp = NULL;
1989 				}
1990 			}
1991 			segmentp->dmais_size = sizesegment;
1992 			previousp = segmentp++;
1993 			--nsegments;
1994 		}
1995 
1996 		if (sgcount > dma_lim->dlim_sgllen) {
1997 			/*
1998 			 * end of a scatter/gather list!
1999 			 * ensure that total length of list is a
2000 			 * multiple of granular (sector size)
2001 			 */
2002 			if (sizesegment != residual_size) {
2003 				uint_t trim;
2004 
2005 				trim = sglistsize &
2006 				    (dma_lim->dlim_granular - 1);
2007 				if (trim >= sizesegment) {
2008 					cmn_err(CE_WARN,
2009 					    "unable to reduce segment size");
2010 					rval = DDI_DMA_NOMAPPING;
2011 					goto bad;
2012 				}
2013 				previousp->dmais_size -= trim;
2014 				sizesegment -= trim;
2015 				/* start new scatter/gather list */
2016 				sgcount = 1;
2017 				sglistsize = 0;
2018 			}
2019 			previousp->dmais_flags |= DMAIS_COMPLEMENT;
2020 		}
2021 		if (sizesegment && (residual_size -= sizesegment)) {
2022 			segmentpadr =
2023 			    rootnex_get_phyaddr(dmareq, sizesegment, php);
2024 			offset = segmentpadr & MMU_PAGEOFFSET;
2025 			segmentvadr += sizesegment;
2026 		}
2027 	} while (residual_size && nsegments);
2028 	ASSERT(residual_size == 0);
2029 
2030 	previousp->dmais_link = NULL;
2031 	previousp->dmais_flags |= DMAIS_WINEND;
2032 	if (curwinp) {
2033 		if (win_flags & DMAIS_NEEDINTBUF)
2034 			curwinp->dmais_flags |= DMAIS_WINUIB;
2035 		curwinp->_win._dmais_nex = NULL;
2036 	} else
2037 		prewinp->_win._dmais_nex = NULL;
2038 
2039 	if ((needintbuf = MAX(needintbuf, reqneedintbuf)) != 0) {
2040 		ddi_dma_attr_t dma_attr;
2041 
2042 
2043 		dma_attr.dma_attr_version = DMA_ATTR_V0;
2044 		dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
2045 		dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
2046 		dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
2047 		dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
2048 		dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
2049 		dma_attr.dma_attr_granular = dma_lim->dlim_granular;
2050 		dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
2051 		dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
2052 		dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
2053 		dma_attr.dma_attr_align = MMU_PAGESIZE;
2054 		dma_attr.dma_attr_flags = 0;
2055 
2056 		/*
2057 		 * Allocate intermediate buffer.
2058 		 */
2059 		if (i_ddi_mem_alloc(dip, &dma_attr, needintbuf,
2060 		    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? 0x1 : 0, 1, 0,
2061 		    &hp->dmai_ibufp, (ulong_t *)&hp->dmai_ibfsz,
2062 		    NULL) != DDI_SUCCESS) {
2063 			rval = DDI_DMA_NORESOURCES;
2064 			goto bad;
2065 		}
2066 		if (mapinfo != DMAMI_KVADR) {
2067 			hp->dmai_kaddr = vmem_alloc(heap_arena, PAGESIZE,
2068 			    VM_SLEEP);
2069 		}
2070 	}
2071 
2072 	/*
2073 	 * return success
2074 	 */
2075 #ifdef DMADEBUG
2076 	DMAPRINT(("dma_brkup: handle %p nsegments %x \n",
2077 	    (void *)hp, numsegments - nsegments));
2078 #endif
2079 	hp->dmai_cookie = NULL;
2080 	*handlep = (ddi_dma_handle_t)hp;
2081 	return (DDI_DMA_MAPPED);
2082 bad:
2083 	if (hp)
2084 		kmem_free(hp, hp->dmai_kmsize);
2085 	if (rval == DDI_DMA_NORESOURCES 	&&
2086 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT &&
2087 	    dmareq->dmar_fp != DDI_DMA_SLEEP)
2088 		ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg,
2089 		    &dvma_call_list_id);
2090 	return (rval);
2091 }
2092 
2093 int
2094 rootnex_io_wtsync(ddi_dma_impl_t *hp, int type)
2095 {
2096 	impl_dma_segment_t *sp = hp->dmai_wins;
2097 	caddr_t	kviradr, addr;
2098 	caddr_t vsrc;
2099 	ulong_t segoffset, vsoffset;
2100 	int cpycnt;
2101 
2102 	addr = hp->dmai_ibufp;
2103 	if ((uintptr_t)addr & MMU_PAGEOFFSET) {
2104 		addr = (caddr_t)(((uintptr_t)addr + MMU_PAGEOFFSET) &
2105 		    ~MMU_PAGEOFFSET);
2106 	}
2107 	if ((sp->dmais_flags & DMAIS_WINUIB) == 0)
2108 		return (DDI_SUCCESS);
2109 
2110 	switch ((intptr_t)hp->dmai_minfo) {
2111 
2112 	case DMAMI_KVADR:
2113 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2114 
2115 			if (hp->dmai_rflags & DDI_DMA_WRITE)
2116 				/*
2117 				 *  copy from segment to buffer
2118 				 */
2119 				bcopy(sp->_vdmu._dmais_va, addr,
2120 				    sp->dmais_size);
2121 			/*
2122 			 * save phys addr of intermediate buffer
2123 			 */
2124 			sp->_pdmu._dmais_lpd =
2125 				ptob64(hat_getpfnum(kas.a_hat, addr));
2126 			if (type == BIND) {
2127 				sp->dmais_cookie->dmac_laddress =
2128 					sp->_pdmu._dmais_lpd;
2129 			}
2130 			addr += MMU_PAGESIZE;
2131 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2132 		    (sp = sp->dmais_link));
2133 		break;
2134 
2135 	case DMAMI_PAGES:
2136 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2137 
2138 			if (hp->dmai_rflags & DDI_DMA_WRITE) {
2139 				/*
2140 				 * need to mapin page so we can have a
2141 				 * virtual address to do copying
2142 				 */
2143 				i86_pp_map(sp->_vdmu._dmais_pp, hp->dmai_kaddr);
2144 				/*
2145 				 *  copy from segment to buffer
2146 				 */
2147 				bcopy(hp->dmai_kaddr +
2148 				    (sp->dmais_ofst & MMU_PAGEOFFSET),
2149 				    addr, sp->dmais_size);
2150 				/*
2151 				 *  need to mapout page
2152 				 */
2153 				hat_unload(kas.a_hat, hp->dmai_kaddr,
2154 				    MMU_PAGESIZE, HAT_UNLOAD);
2155 			}
2156 			/*
2157 			 * save phys addr of intemediate buffer
2158 			 */
2159 			sp->_pdmu._dmais_lpd =
2160 				ptob64(hat_getpfnum(kas.a_hat, addr));
2161 			if (type == BIND) {
2162 				sp->dmais_cookie->dmac_laddress =
2163 					sp->_pdmu._dmais_lpd;
2164 			}
2165 			addr += MMU_PAGESIZE;
2166 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2167 		    (sp = sp->dmais_link));
2168 		break;
2169 
2170 	case DMAMI_UVADR:
2171 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2172 
2173 			if (hp->dmai_rflags & DDI_DMA_WRITE) {
2174 				struct page **pplist;
2175 				segoffset = 0;
2176 				do {
2177 					/*
2178 					 * need to mapin page so we can have a
2179 					 * virtual address to do copying
2180 					 */
2181 					vsrc = sp->_vdmu._dmais_va + segoffset;
2182 					vsoffset =
2183 					    (ulong_t)vsrc & MMU_PAGEOFFSET;
2184 					pplist = hp->dmai_object.dmao_obj.
2185 							virt_obj.v_priv;
2186 					/*
2187 					 * check if we have to use the
2188 					 * shadow list or the CPU mapping.
2189 					 */
2190 					if (pplist != NULL) {
2191 						ulong_t base, off;
2192 
2193 						base = (ulong_t)hp->dmai_object.
2194 						    dmao_obj.virt_obj.v_addr;
2195 						off = (base & MMU_PAGEOFFSET) +
2196 							(ulong_t)vsrc - base;
2197 						i86_pp_map(pplist[btop(off)],
2198 							hp->dmai_kaddr);
2199 					} else {
2200 						i86_va_map(vsrc,
2201 						    hp->dmai_object.dmao_obj.
2202 							virt_obj.v_as,
2203 						    hp->dmai_kaddr);
2204 					}
2205 					kviradr = hp->dmai_kaddr + vsoffset;
2206 					cpycnt = sp->dmais_size - segoffset;
2207 					if (vsoffset + cpycnt > MMU_PAGESIZE)
2208 						cpycnt = MMU_PAGESIZE -
2209 						    vsoffset;
2210 					/*
2211 					 *  copy from segment to buffer
2212 					 */
2213 					bcopy(kviradr, addr + segoffset,
2214 					    cpycnt);
2215 					/*
2216 					 *  need to mapout page
2217 					 */
2218 					hat_unload(kas.a_hat, hp->dmai_kaddr,
2219 					    MMU_PAGESIZE, HAT_UNLOAD);
2220 					segoffset += cpycnt;
2221 				} while (segoffset < sp->dmais_size);
2222 			}
2223 			/*
2224 			 * save phys addr of intermediate buffer
2225 			 */
2226 			sp->_pdmu._dmais_lpd =
2227 				ptob64(hat_getpfnum(kas.a_hat, addr));
2228 			if (type == BIND) {
2229 				sp->dmais_cookie->dmac_laddress =
2230 					sp->_pdmu._dmais_lpd;
2231 			}
2232 			addr += MMU_PAGESIZE;
2233 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2234 		    (sp = sp->dmais_link));
2235 		break;
2236 
2237 	default:
2238 		cmn_err(CE_WARN, "Invalid dma handle/map info");
2239 	}
2240 	return (DDI_SUCCESS);
2241 }
2242 
2243 int
2244 rootnex_io_rdsync(ddi_dma_impl_t *hp)
2245 {
2246 	impl_dma_segment_t *sp = hp->dmai_wins;
2247 	caddr_t	kviradr;
2248 	caddr_t vdest, addr;
2249 	ulong_t segoffset, vdoffset;
2250 	int cpycnt;
2251 
2252 	addr = hp->dmai_ibufp;
2253 	if ((uintptr_t)addr & MMU_PAGEOFFSET) {
2254 	    addr = (caddr_t)
2255 		(((uintptr_t)addr + MMU_PAGEOFFSET) & ~MMU_PAGEOFFSET);
2256 	}
2257 	if (!(sp->dmais_flags & DMAIS_WINUIB) ||
2258 			!(hp->dmai_rflags & DDI_DMA_READ))
2259 		return (DDI_SUCCESS);
2260 
2261 	switch ((intptr_t)hp->dmai_minfo) {
2262 
2263 	case DMAMI_KVADR:
2264 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2265 			/*
2266 			 *  copy from buffer to segment
2267 			 */
2268 			bcopy(addr, sp->_vdmu._dmais_va, sp->dmais_size);
2269 			addr += MMU_PAGESIZE;
2270 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2271 		    (sp = sp->dmais_link));
2272 		break;
2273 
2274 	case DMAMI_PAGES:
2275 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2276 			/*
2277 			 * need to mapin page
2278 			 */
2279 			i86_pp_map(sp->_vdmu._dmais_pp, hp->dmai_kaddr);
2280 			/*
2281 			 *  copy from buffer to segment
2282 			 */
2283 			bcopy(addr,
2284 			    (hp->dmai_kaddr +
2285 				(sp->dmais_ofst & MMU_PAGEOFFSET)),
2286 			    sp->dmais_size);
2287 
2288 			/*
2289 			 *  need to mapout page
2290 			 */
2291 			hat_unload(kas.a_hat, hp->dmai_kaddr,
2292 			    MMU_PAGESIZE, HAT_UNLOAD);
2293 			addr += MMU_PAGESIZE;
2294 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2295 		    (sp = sp->dmais_link));
2296 		break;
2297 
2298 	case DMAMI_UVADR:
2299 		do if (sp->dmais_flags & DMAIS_NEEDINTBUF) {
2300 			struct page **pplist;
2301 			segoffset = 0;
2302 			do {
2303 				/*
2304 				 * need to map_in user virtual address
2305 				 */
2306 				vdest = sp->_vdmu._dmais_va + segoffset;
2307 				vdoffset = (ulong_t)vdest & MMU_PAGEOFFSET;
2308 				pplist = hp->dmai_object.dmao_obj.
2309 						virt_obj.v_priv;
2310 				/*
2311 				 * check if we have to use the
2312 				 * shadow list or the CPU mapping.
2313 				 */
2314 				if (pplist != NULL) {
2315 					ulong_t base, off;
2316 
2317 					base = (ulong_t)hp->dmai_object.
2318 						dmao_obj.virt_obj.v_addr;
2319 					off = (base & MMU_PAGEOFFSET) +
2320 						(ulong_t)vdest - base;
2321 					i86_pp_map(pplist[btop(off)],
2322 						hp->dmai_kaddr);
2323 				} else {
2324 					i86_va_map(vdest,
2325 					    hp->dmai_object.dmao_obj.
2326 						virt_obj.v_as,
2327 					    hp->dmai_kaddr);
2328 				}
2329 				kviradr = hp->dmai_kaddr + vdoffset;
2330 				cpycnt = sp->dmais_size - segoffset;
2331 				if (vdoffset + cpycnt > MMU_PAGESIZE)
2332 					cpycnt = MMU_PAGESIZE - vdoffset;
2333 				/*
2334 				 *  copy from buffer to segment
2335 				 */
2336 				bcopy(addr + segoffset, kviradr, cpycnt);
2337 				/*
2338 				 *  need to map_out page
2339 				 */
2340 				hat_unload(kas.a_hat, hp->dmai_kaddr,
2341 				    MMU_PAGESIZE, HAT_UNLOAD);
2342 				segoffset += cpycnt;
2343 			} while (segoffset < sp->dmais_size);
2344 			addr += MMU_PAGESIZE;
2345 		} while (!(sp->dmais_flags & DMAIS_WINEND) &&
2346 		    (sp = sp->dmais_link));
2347 		break;
2348 
2349 	default:
2350 		cmn_err(CE_WARN, "Invalid dma handle/map info");
2351 	}
2352 	return (DDI_SUCCESS);
2353 }
2354 
2355 static int
2356 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2357     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2358     off_t *offp, size_t *lenp,
2359     caddr_t *objpp, uint_t cache_flags)
2360 {
2361 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2362 	impl_dma_segment_t *sp = (impl_dma_segment_t *)lenp;
2363 	impl_dma_segment_t *wp = (impl_dma_segment_t *)offp;
2364 #if !defined(__amd64)
2365 	ddi_dma_cookie_t *cp;
2366 #endif
2367 	int rval = DDI_SUCCESS;
2368 
2369 #ifdef lint
2370 	dip = dip;
2371 	rdip = rdip;
2372 #endif
2373 
2374 	DMAPRINT(("io_mctl: handle %p ", (void *)hp));
2375 
2376 	switch (request) {
2377 
2378 	case DDI_DMA_SEGTOC:
2379 #if defined(__amd64)
2380 		/*
2381 		 * ddi_dma_segtocookie(9F) is Obsolete, and the whole
2382 		 * passing-the-pointer-through-the-cache-flags thing just
2383 		 * doesn't work when pointers are 64-bit and cache_flags
2384 		 * are 32-bit.
2385 		 */
2386 		DMAPRINT(("stoc invoked but not implemented.\n"));
2387 		return (DDI_FAILURE);
2388 #else
2389 		/* return device specific dma cookie for segment */
2390 		sp = (impl_dma_segment_t *)(uintptr_t)cache_flags;
2391 		if (!sp) {
2392 			DMAPRINT(("stoc segment %p end\n", (void *)sp));
2393 			return (DDI_FAILURE);
2394 		}
2395 		cp = (ddi_dma_cookie_t *)objpp;
2396 
2397 		/*
2398 		 * use phys addr of actual buffer or intermediate buffer
2399 		 */
2400 		cp->dmac_laddress = sp->_pdmu._dmais_lpd;
2401 
2402 		DMAPRINT(("stoc segment %p mapping %lx size %lx\n",
2403 		    (void *)sp, (ulong_t)sp->_vdmu._dmais_va, sp->dmais_size));
2404 
2405 		cp->dmac_type = (ulong_t)sp;
2406 		*lenp = cp->dmac_size = sp->dmais_size;
2407 		*offp = sp->dmais_ofst;
2408 		return (DDI_SUCCESS);
2409 #endif
2410 
2411 	case DDI_DMA_NEXTSEG:	/* get next DMA segment	*/
2412 		ASSERT(wp->dmais_flags & DMAIS_WINSTRT);
2413 		if (wp != hp->dmai_wins) {
2414 			DMAPRINT(("nxseg: not current window %p\n",
2415 			    (void *)wp));
2416 			return (DDI_DMA_STALE);
2417 		}
2418 		if (!sp) {
2419 			/*
2420 			 * reset to first segment in current window
2421 			 */
2422 			*objpp = (caddr_t)wp;
2423 		} else {
2424 			if (sp->dmais_flags & DMAIS_WINEND) {
2425 				DMAPRINT(("nxseg: seg %p eow\n", (void *)sp));
2426 				return (DDI_DMA_DONE);
2427 			}
2428 			/* check if segment is really in window */
2429 			ASSERT((sp->dmais_flags & DMAIS_WINSTRT) && sp == wp ||
2430 			    !(sp->dmais_flags & DMAIS_WINSTRT) &&
2431 			    sp->_win._dmais_cur == wp);
2432 			*objpp = (caddr_t)sp->dmais_link;
2433 		}
2434 		DMAPRINT(("nxseg: new seg %p\n", (void *)*objpp));
2435 		return (DDI_SUCCESS);
2436 
2437 	case DDI_DMA_NEXTWIN:	/* get next DMA window	*/
2438 		if (hp->dmai_wins && hp->dmai_ibufp)
2439 			/*
2440 			 * do implied sync on current window
2441 			 */
2442 			(void) rootnex_io_rdsync(hp);
2443 		if (!wp) {
2444 			/*
2445 			 * reset to (first segment of) first window
2446 			 */
2447 			*objpp = (caddr_t)hp->dmai_hds;
2448 			DMAPRINT(("nxwin: first win %p\n", (void *)*objpp));
2449 		} else {
2450 			ASSERT(wp->dmais_flags & DMAIS_WINSTRT);
2451 			if (wp != hp->dmai_wins) {
2452 				DMAPRINT(("nxwin: win %p not current\n",
2453 				    (void *)wp));
2454 				return (DDI_DMA_STALE);
2455 			}
2456 			if (wp->_win._dmais_nex == 0) {
2457 				DMAPRINT(("nxwin: win %p end\n", (void *)wp));
2458 				return (DDI_DMA_DONE);
2459 			}
2460 			*objpp = (caddr_t)wp->_win._dmais_nex;
2461 			DMAPRINT(("nxwin: new win %p\n", (void *)*objpp));
2462 		}
2463 		hp->dmai_wins = (impl_dma_segment_t *)*objpp;
2464 		if (hp->dmai_ibufp)
2465 			return (rootnex_io_wtsync(hp, MAP));
2466 		return (DDI_SUCCESS);
2467 
2468 	case DDI_DMA_FREE:
2469 		DMAPRINT(("free handle\n"));
2470 		if (hp->dmai_ibufp) {
2471 			rval = rootnex_io_rdsync(hp);
2472 			ddi_mem_free(hp->dmai_ibufp);
2473 		}
2474 		if (hp->dmai_kaddr)
2475 			vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE);
2476 		kmem_free(hp, hp->dmai_kmsize);
2477 		if (dvma_call_list_id)
2478 			ddi_run_callback(&dvma_call_list_id);
2479 		break;
2480 
2481 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
2482 		DMAPRINT(("iopb alloc\n"));
2483 		rval = i_ddi_mem_alloc_lim(rdip, (ddi_dma_lim_t *)offp,
2484 		    *lenp, 0, 0, 0, objpp, NULL, NULL);
2485 		break;
2486 
2487 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
2488 		DMAPRINT(("mem alloc\n"));
2489 		rval = i_ddi_mem_alloc_lim(rdip, (ddi_dma_lim_t *)offp,
2490 		    *lenp, cache_flags, 1, 0, objpp, (uint_t *)handle, NULL);
2491 		break;
2492 
2493 	case DDI_DMA_KVADDR:
2494 		DMAPRINT(("kvaddr of phys mapping\n"));
2495 		return (DDI_FAILURE);
2496 
2497 	case DDI_DMA_GETERR:
2498 		DMAPRINT(("geterr\n"));
2499 		rval = DDI_FAILURE;
2500 		break;
2501 
2502 	case DDI_DMA_COFF:
2503 		DMAPRINT(("coff off %p mapping %llx size %lx\n",
2504 		    (void *)*objpp,
2505 		    (unsigned long long)hp->dmai_wins->_pdmu._dmais_lpd,
2506 		    hp->dmai_wins->dmais_size));
2507 		rval = DDI_FAILURE;
2508 		break;
2509 
2510 	default:
2511 		DMAPRINT(("unknown 0x%x\n", request));
2512 		return (DDI_FAILURE);
2513 	}
2514 	return (rval);
2515 }
2516 
2517 /*
2518  * Root nexus ctl functions
2519  */
2520 #define	REPORTDEV_BUFSIZE	1024
2521 
2522 static int
2523 rootnex_ctl_reportdev(dev_info_t *dev)
2524 {
2525 	int i, n, len, f_len = 0;
2526 	char *buf;
2527 
2528 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
2529 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
2530 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
2531 	len = strlen(buf);
2532 
2533 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
2534 
2535 		struct regspec *rp = sparc_pd_getreg(dev, i);
2536 
2537 		if (i == 0)
2538 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2539 			    ": ");
2540 		else
2541 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2542 			    " and ");
2543 		len = strlen(buf);
2544 
2545 		switch (rp->regspec_bustype) {
2546 
2547 		case BTEISA:
2548 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2549 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
2550 			break;
2551 
2552 		case BTISA:
2553 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2554 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
2555 			break;
2556 
2557 		default:
2558 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2559 			    "space %x offset %x",
2560 			    rp->regspec_bustype, rp->regspec_addr);
2561 			break;
2562 		}
2563 		len = strlen(buf);
2564 	}
2565 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
2566 		int pri;
2567 
2568 		if (i != 0) {
2569 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2570 			    ",");
2571 			len = strlen(buf);
2572 		}
2573 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
2574 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
2575 		    " sparc ipl %d", pri);
2576 		len = strlen(buf);
2577 	}
2578 #ifdef DEBUG
2579 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
2580 		cmn_err(CE_NOTE, "next message is truncated: "
2581 		    "printed length 1024, real length %d", f_len);
2582 	}
2583 #endif /* DEBUG */
2584 	cmn_err(CE_CONT, "?%s\n", buf);
2585 	kmem_free(buf, REPORTDEV_BUFSIZE);
2586 	return (DDI_SUCCESS);
2587 }
2588 
2589 /*
2590  * For the x86 rootnexus, we're prepared to claim that the interrupt string
2591  * is in the form of a list of <ipl,vec> specifications.
2592  */
2593 
2594 #define	VEC_MIN	1
2595 #define	VEC_MAX	255
2596 static int
2597 rootnex_xlate_intrs(dev_info_t *dip, dev_info_t *rdip, int *in,
2598 	struct ddi_parent_private_data *pdptr)
2599 {
2600 	size_t size;
2601 	int n;
2602 	struct intrspec *new;
2603 	caddr_t got_prop;
2604 	int *inpri;
2605 	int got_len;
2606 	extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
2607 
2608 	static char bad_intr_fmt[] =
2609 	    "rootnex: bad interrupt spec from %s%d - ipl %d, irq %d\n";
2610 
2611 #ifdef	lint
2612 	dip = dip;
2613 #endif
2614 	/*
2615 	 * determine if the driver is expecting the new style "interrupts"
2616 	 * property which just contains the IRQ, or the old style which
2617 	 * contains pairs of <IPL,IRQ>.  if it is the new style, we always
2618 	 * assign IPL 5 unless an "interrupt-priorities" property exists.
2619 	 * in that case, the "interrupt-priorities" property contains the
2620 	 * IPL values that match, one for one, the IRQ values in the
2621 	 * "interrupts" property.
2622 	 */
2623 	inpri = NULL;
2624 	if ((ddi_getprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
2625 	    "ignore-hardware-nodes", -1) != -1) ||
2626 	    ignore_hardware_nodes) {
2627 		/* the old style "interrupts" property... */
2628 
2629 		/*
2630 		 * The list consists of <ipl,vec> elements
2631 		 */
2632 		if ((n = (*in++ >> 1)) < 1)
2633 			return (DDI_FAILURE);
2634 
2635 		pdptr->par_nintr = n;
2636 		size = n * sizeof (struct intrspec);
2637 		new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
2638 
2639 		while (n--) {
2640 			int level = *in++;
2641 			int vec = *in++;
2642 
2643 			if (level < 1 || level > MAXIPL ||
2644 			    vec < VEC_MIN || vec > VEC_MAX) {
2645 				cmn_err(CE_CONT, bad_intr_fmt,
2646 				    DEVI(rdip)->devi_name,
2647 				    DEVI(rdip)->devi_instance, level, vec);
2648 				goto broken;
2649 			}
2650 			new->intrspec_pri = level;
2651 			if (vec != 2)
2652 				new->intrspec_vec = vec;
2653 			else
2654 				/*
2655 				 * irq 2 on the PC bus is tied to irq 9
2656 				 * on ISA, EISA and MicroChannel
2657 				 */
2658 				new->intrspec_vec = 9;
2659 			new++;
2660 		}
2661 
2662 		return (DDI_SUCCESS);
2663 	} else {
2664 		/* the new style "interrupts" property... */
2665 
2666 		/*
2667 		 * The list consists of <vec> elements
2668 		 */
2669 		if ((n = (*in++)) < 1)
2670 			return (DDI_FAILURE);
2671 
2672 		pdptr->par_nintr = n;
2673 		size = n * sizeof (struct intrspec);
2674 		new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
2675 
2676 		/* XXX check for "interrupt-priorities" property... */
2677 		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
2678 		    "interrupt-priorities", (caddr_t)&got_prop, &got_len)
2679 		    == DDI_PROP_SUCCESS) {
2680 			if (n != (got_len / sizeof (int))) {
2681 				cmn_err(CE_CONT,
2682 				    "rootnex: bad interrupt-priorities length"
2683 				    " from %s%d: expected %d, got %d\n",
2684 				    DEVI(rdip)->devi_name,
2685 				    DEVI(rdip)->devi_instance, n,
2686 				    (int)(got_len / sizeof (int)));
2687 				goto broken;
2688 			}
2689 			inpri = (int *)got_prop;
2690 		}
2691 
2692 		while (n--) {
2693 			int level;
2694 			int vec = *in++;
2695 
2696 			if (inpri == NULL)
2697 				level = 5;
2698 			else
2699 				level = *inpri++;
2700 
2701 			if (level < 1 || level > MAXIPL ||
2702 			    vec < VEC_MIN || vec > VEC_MAX) {
2703 				cmn_err(CE_CONT, bad_intr_fmt,
2704 				    DEVI(rdip)->devi_name,
2705 				    DEVI(rdip)->devi_instance, level, vec);
2706 				goto broken;
2707 			}
2708 			new->intrspec_pri = level;
2709 			if (vec != 2)
2710 				new->intrspec_vec = vec;
2711 			else
2712 				/*
2713 				 * irq 2 on the PC bus is tied to irq 9
2714 				 * on ISA, EISA and MicroChannel
2715 				 */
2716 				new->intrspec_vec = 9;
2717 			new++;
2718 		}
2719 
2720 		if (inpri != NULL)
2721 			kmem_free(got_prop, got_len);
2722 		return (DDI_SUCCESS);
2723 	}
2724 
2725 broken:
2726 	kmem_free(pdptr->par_intr, size);
2727 	pdptr->par_intr = NULL;
2728 	pdptr->par_nintr = 0;
2729 	if (inpri != NULL)
2730 		kmem_free(got_prop, got_len);
2731 	return (DDI_FAILURE);
2732 }
2733 
2734 /*ARGSUSED*/
2735 static int
2736 rootnex_ctl_children(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
2737     dev_info_t *child)
2738 {
2739 	extern int impl_ddi_sunbus_initchild(dev_info_t *);
2740 	extern void impl_ddi_sunbus_removechild(dev_info_t *);
2741 
2742 	switch (ctlop)  {
2743 	case DDI_CTLOPS_INITCHILD:
2744 		return (impl_ddi_sunbus_initchild(child));
2745 
2746 	case DDI_CTLOPS_UNINITCHILD:
2747 		impl_ddi_sunbus_removechild(child);
2748 		return (DDI_SUCCESS);
2749 	default:
2750 		return (DDI_FAILURE);
2751 	}
2752 }
2753 
2754 
2755 static int
2756 rootnex_ctlops_poke(peekpoke_ctlops_t *in_args)
2757 {
2758 	int err = DDI_SUCCESS;
2759 	on_trap_data_t otd;
2760 
2761 	/* Cautious access not supported. */
2762 	if (in_args->handle != NULL)
2763 		return (DDI_FAILURE);
2764 
2765 	mutex_enter(&pokefault_mutex);
2766 	pokefault = -1;
2767 
2768 	/* Set up protected environment. */
2769 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
2770 		switch (in_args->size) {
2771 		case sizeof (uint8_t):
2772 			*(uint8_t *)in_args->dev_addr =
2773 			    *(uint8_t *)in_args->host_addr;
2774 			break;
2775 
2776 		case sizeof (uint16_t):
2777 			*(uint16_t *)in_args->dev_addr =
2778 			    *(uint16_t *)in_args->host_addr;
2779 			break;
2780 
2781 		case sizeof (uint32_t):
2782 			*(uint32_t *)in_args->dev_addr =
2783 			    *(uint32_t *)in_args->host_addr;
2784 			break;
2785 
2786 		case sizeof (uint64_t):
2787 			*(uint64_t *)in_args->dev_addr =
2788 			    *(uint64_t *)in_args->host_addr;
2789 			break;
2790 
2791 		default:
2792 			err = DDI_FAILURE;
2793 			break;
2794 		}
2795 	} else
2796 		err = DDI_FAILURE;
2797 
2798 	/* Take down protected environment. */
2799 	no_trap();
2800 
2801 	pokefault = 0;
2802 	mutex_exit(&pokefault_mutex);
2803 
2804 	return (err);
2805 }
2806 
2807 
2808 static int
2809 rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result)
2810 {
2811 	int err = DDI_SUCCESS;
2812 	on_trap_data_t otd;
2813 
2814 	/* Cautious access not supported. */
2815 	if (in_args->handle != NULL)
2816 		return (DDI_FAILURE);
2817 
2818 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
2819 		switch (in_args->size) {
2820 		case sizeof (uint8_t):
2821 			*(uint8_t *)in_args->host_addr =
2822 			    *(uint8_t *)in_args->dev_addr;
2823 			break;
2824 
2825 		case sizeof (uint16_t):
2826 			*(uint16_t *)in_args->host_addr =
2827 			    *(uint16_t *)in_args->dev_addr;
2828 			break;
2829 
2830 		case sizeof (uint32_t):
2831 			*(uint32_t *)in_args->host_addr =
2832 			    *(uint32_t *)in_args->dev_addr;
2833 			break;
2834 
2835 		case sizeof (uint64_t):
2836 			*(uint64_t *)in_args->host_addr =
2837 			    *(uint64_t *)in_args->dev_addr;
2838 			break;
2839 
2840 		default:
2841 			err = DDI_FAILURE;
2842 			break;
2843 		}
2844 		result = (void *)in_args->host_addr;
2845 	} else
2846 		err = DDI_FAILURE;
2847 
2848 	no_trap();
2849 	return (err);
2850 }
2851 
2852 static int
2853 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
2854     ddi_ctl_enum_t ctlop, void *arg, void *result)
2855 {
2856 	int n, *ptr;
2857 	struct ddi_parent_private_data *pdp;
2858 
2859 	static boolean_t reserved_msg_printed = B_FALSE;
2860 
2861 	switch (ctlop) {
2862 	case DDI_CTLOPS_DMAPMAPC:
2863 		/*
2864 		 * Return 'partial' to indicate that dma mapping
2865 		 * has to be done in the main MMU.
2866 		 */
2867 		return (DDI_DMA_PARTIAL);
2868 
2869 	case DDI_CTLOPS_BTOP:
2870 		/*
2871 		 * Convert byte count input to physical page units.
2872 		 * (byte counts that are not a page-size multiple
2873 		 * are rounded down)
2874 		 */
2875 		*(ulong_t *)result = btop(*(ulong_t *)arg);
2876 		return (DDI_SUCCESS);
2877 
2878 	case DDI_CTLOPS_PTOB:
2879 		/*
2880 		 * Convert size in physical pages to bytes
2881 		 */
2882 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
2883 		return (DDI_SUCCESS);
2884 
2885 	case DDI_CTLOPS_BTOPR:
2886 		/*
2887 		 * Convert byte count input to physical page units
2888 		 * (byte counts that are not a page-size multiple
2889 		 * are rounded up)
2890 		 */
2891 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
2892 		return (DDI_SUCCESS);
2893 
2894 	case DDI_CTLOPS_POKE:
2895 		return (rootnex_ctlops_poke((peekpoke_ctlops_t *)arg));
2896 
2897 	case DDI_CTLOPS_PEEK:
2898 		return (rootnex_ctlops_peek((peekpoke_ctlops_t *)arg, result));
2899 
2900 	case DDI_CTLOPS_INITCHILD:
2901 	case DDI_CTLOPS_UNINITCHILD:
2902 		return (rootnex_ctl_children(dip, rdip, ctlop, arg));
2903 
2904 	case DDI_CTLOPS_REPORTDEV:
2905 		return (rootnex_ctl_reportdev(rdip));
2906 
2907 	case DDI_CTLOPS_IOMIN:
2908 		/*
2909 		 * Nothing to do here but reflect back..
2910 		 */
2911 		return (DDI_SUCCESS);
2912 
2913 	case DDI_CTLOPS_REGSIZE:
2914 	case DDI_CTLOPS_NREGS:
2915 	case DDI_CTLOPS_NINTRS:
2916 		break;
2917 
2918 	case DDI_CTLOPS_SIDDEV:
2919 		if (ndi_dev_is_prom_node(rdip))
2920 			return (DDI_SUCCESS);
2921 		if (ndi_dev_is_persistent_node(rdip))
2922 			return (DDI_SUCCESS);
2923 		return (DDI_FAILURE);
2924 
2925 	case DDI_CTLOPS_INTR_HILEVEL:
2926 		/*
2927 		 * Indicate whether the interrupt specified is to be handled
2928 		 * above lock level.  In other words, above the level that
2929 		 * cv_signal and default type mutexes can be used.
2930 		 */
2931 		*(int *)result =
2932 		    (INT_IPL(((struct intrspec *)arg)->intrspec_pri)
2933 		    > LOCK_LEVEL);
2934 		return (DDI_SUCCESS);
2935 
2936 	case DDI_CTLOPS_XLATE_INTRS:
2937 		return (rootnex_xlate_intrs(dip, rdip, arg, result));
2938 
2939 	case DDI_CTLOPS_POWER:
2940 		return ((*pm_platform_power)((power_req_t *)arg));
2941 
2942 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
2943 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
2944 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
2945 		if (!reserved_msg_printed) {
2946 			reserved_msg_printed = B_TRUE;
2947 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
2948 			    "1 or more reserved/obsolete operations.");
2949 		}
2950 		return (DDI_FAILURE);
2951 
2952 	default:
2953 		return (DDI_FAILURE);
2954 	}
2955 	/*
2956 	 * The rest are for "hardware" properties
2957 	 */
2958 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
2959 		return (DDI_FAILURE);
2960 
2961 	if (ctlop == DDI_CTLOPS_NREGS) {
2962 		ptr = (int *)result;
2963 		*ptr = pdp->par_nreg;
2964 	} else if (ctlop == DDI_CTLOPS_NINTRS) {
2965 		ptr = (int *)result;
2966 		*ptr = pdp->par_nintr;
2967 	} else {
2968 		off_t *size = (off_t *)result;
2969 
2970 		ptr = (int *)arg;
2971 		n = *ptr;
2972 		if (n >= pdp->par_nreg) {
2973 			return (DDI_FAILURE);
2974 		}
2975 		*size = (off_t)pdp->par_reg[n].regspec_size;
2976 	}
2977 	return (DDI_SUCCESS);
2978 }
2979 
2980 /*
2981  * rootnex_get_ispec:
2982  *	convert an interrupt number to an interrupt specification.
2983  *	The interrupt number determines which interrupt spec will be
2984  *	returned if more than one exists.
2985  *
2986  *	Look into the parent private data area of the 'rdip' to find out
2987  *	the interrupt specification.  First check to make sure there is
2988  *	one that matchs "inumber" and then return a pointer to it.
2989  *
2990  *	Return NULL if one could not be found.
2991  *
2992  *	NOTE: This is needed for rootnex_intr_ops()
2993  */
2994 static struct intrspec *
2995 rootnex_get_ispec(dev_info_t *rdip, int inum)
2996 {
2997 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
2998 
2999 	/*
3000 	 * Special case handling for drivers that provide their own
3001 	 * intrspec structures instead of relying on the DDI framework.
3002 	 *
3003 	 * A broken hardware driver in ON could potentially provide its
3004 	 * own intrspec structure, instead of relying on the hardware.
3005 	 * If these drivers are children of 'rootnex' then we need to
3006 	 * continue to provide backward compatibility to them here.
3007 	 *
3008 	 * Following check is a special case for 'pcic' driver which
3009 	 * was found to have broken hardwre andby provides its own intrspec.
3010 	 *
3011 	 * Verbatim comments from this driver are shown here:
3012 	 * "Don't use the ddi_add_intr since we don't have a
3013 	 * default intrspec in all cases."
3014 	 *
3015 	 * Since an 'ispec' may not be always created for it,
3016 	 * check for that and create one if so.
3017 	 *
3018 	 * NOTE: Currently 'pcic' is the only driver found to do this.
3019 	 */
3020 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
3021 		pdp->par_nintr = 1;
3022 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
3023 		    pdp->par_nintr, KM_SLEEP);
3024 	}
3025 
3026 	/* Validate the interrupt number */
3027 	if (inum >= pdp->par_nintr)
3028 		return (NULL);
3029 
3030 	/* Get the interrupt structure pointer and return that */
3031 	return ((struct intrspec *)&pdp->par_intr[inum]);
3032 }
3033 
3034 
3035 /*
3036  * rootnex_intr_ops:
3037  *	bus_intr_op() function for interrupt support
3038  */
3039 /* ARGSUSED */
3040 static int
3041 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
3042     ddi_intr_handle_impl_t *hdlp, void *result)
3043 {
3044 	struct intrspec			*ispec;
3045 	struct ddi_parent_private_data	*pdp;
3046 
3047 	DDI_INTR_NEXDBG((CE_CONT,
3048 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
3049 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
3050 
3051 	/* Process the interrupt operation */
3052 	switch (intr_op) {
3053 	case DDI_INTROP_GETCAP:
3054 		/* First check with pcplusmp */
3055 		if (psm_intr_ops == NULL)
3056 			return (DDI_FAILURE);
3057 
3058 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
3059 			*(int *)result = 0;
3060 			return (DDI_FAILURE);
3061 		}
3062 		break;
3063 	case DDI_INTROP_SETCAP:
3064 		if (psm_intr_ops == NULL)
3065 			return (DDI_FAILURE);
3066 
3067 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
3068 			return (DDI_FAILURE);
3069 		break;
3070 	case DDI_INTROP_ALLOC:
3071 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3072 			return (DDI_FAILURE);
3073 		hdlp->ih_pri = ispec->intrspec_pri;
3074 		*(int *)result = hdlp->ih_scratch1;
3075 		break;
3076 	case DDI_INTROP_FREE:
3077 		pdp = ddi_get_parent_data(rdip);
3078 		/*
3079 		 * Special case for 'pcic' driver' only.
3080 		 * If an intrspec was created for it, clean it up here
3081 		 * See detailed comments on this in the function
3082 		 * rootnex_get_ispec().
3083 		 */
3084 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
3085 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
3086 			    pdp->par_nintr);
3087 			/*
3088 			 * Set it to zero; so that
3089 			 * DDI framework doesn't free it again
3090 			 */
3091 			pdp->par_intr = NULL;
3092 			pdp->par_nintr = 0;
3093 		}
3094 		break;
3095 	case DDI_INTROP_GETPRI:
3096 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3097 			return (DDI_FAILURE);
3098 		*(int *)result = ispec->intrspec_pri;
3099 		break;
3100 	case DDI_INTROP_SETPRI:
3101 		/* Validate the interrupt priority passed to us */
3102 		if (*(int *)result > LOCK_LEVEL)
3103 			return (DDI_FAILURE);
3104 
3105 		/* Ensure that PSM is all initialized and ispec is ok */
3106 		if ((psm_intr_ops == NULL) ||
3107 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
3108 			return (DDI_FAILURE);
3109 
3110 		/* Change the priority */
3111 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
3112 		    PSM_FAILURE)
3113 			return (DDI_FAILURE);
3114 
3115 		/* update the ispec with the new priority */
3116 		ispec->intrspec_pri =  *(int *)result;
3117 		break;
3118 	case DDI_INTROP_ADDISR:
3119 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3120 			return (DDI_FAILURE);
3121 		ispec->intrspec_func = hdlp->ih_cb_func;
3122 		break;
3123 	case DDI_INTROP_REMISR:
3124 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3125 			return (DDI_FAILURE);
3126 		ispec->intrspec_func = (uint_t (*)()) 0;
3127 		break;
3128 	case DDI_INTROP_ENABLE:
3129 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3130 			return (DDI_FAILURE);
3131 
3132 		/* Call psmi to translate irq with the dip */
3133 		if (psm_intr_ops == NULL)
3134 			return (DDI_FAILURE);
3135 
3136 		hdlp->ih_private = (void *)ispec;
3137 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
3138 		    (int *)&hdlp->ih_vector);
3139 
3140 		/* Add the interrupt handler */
3141 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
3142 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
3143 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rdip))
3144 			return (DDI_FAILURE);
3145 		break;
3146 	case DDI_INTROP_DISABLE:
3147 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3148 			return (DDI_FAILURE);
3149 
3150 		/* Call psm_ops() to translate irq with the dip */
3151 		if (psm_intr_ops == NULL)
3152 			return (DDI_FAILURE);
3153 
3154 		hdlp->ih_private = (void *)ispec;
3155 		(void) (*psm_intr_ops)(rdip, hdlp,
3156 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
3157 
3158 		/* Remove the interrupt handler */
3159 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
3160 		    hdlp->ih_cb_func, hdlp->ih_vector);
3161 		break;
3162 	case DDI_INTROP_SETMASK:
3163 		if (psm_intr_ops == NULL)
3164 			return (DDI_FAILURE);
3165 
3166 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
3167 			return (DDI_FAILURE);
3168 		break;
3169 	case DDI_INTROP_CLRMASK:
3170 		if (psm_intr_ops == NULL)
3171 			return (DDI_FAILURE);
3172 
3173 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
3174 			return (DDI_FAILURE);
3175 		break;
3176 	case DDI_INTROP_GETPENDING:
3177 		if (psm_intr_ops == NULL)
3178 			return (DDI_FAILURE);
3179 
3180 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
3181 		    result)) {
3182 			*(int *)result = 0;
3183 			return (DDI_FAILURE);
3184 		}
3185 		break;
3186 	case DDI_INTROP_NINTRS:
3187 		if ((pdp = ddi_get_parent_data(rdip)) == NULL)
3188 			return (DDI_FAILURE);
3189 		*(int *)result = pdp->par_nintr;
3190 		if (pdp->par_nintr == 0) {
3191 			/*
3192 			 * Special case for 'pcic' driver' only. This driver
3193 			 * driver is a child of 'isa' and 'rootnex' drivers.
3194 			 *
3195 			 * See detailed comments on this in the function
3196 			 * rootnex_get_ispec().
3197 			 *
3198 			 * Children of 'pcic' send 'NINITR' request all the
3199 			 * way to rootnex driver. But, the 'pdp->par_nintr'
3200 			 * field may not initialized. So, we fake it here
3201 			 * to return 1 (a la what PCMCIA nexus does).
3202 			 */
3203 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
3204 				*(int *)result = 1;
3205 		}
3206 		break;
3207 	case DDI_INTROP_SUPPORTED_TYPES:
3208 		*(int *)result = 0;
3209 		*(int *)result |= DDI_INTR_TYPE_FIXED;	/* Always ... */
3210 		break;
3211 	case DDI_INTROP_NAVAIL:
3212 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
3213 			return (DDI_FAILURE);
3214 
3215 		if (psm_intr_ops == NULL) {
3216 			*(int *)result = 1;
3217 			break;
3218 		}
3219 
3220 		/* Priority in the handle not initialized yet */
3221 		hdlp->ih_pri = ispec->intrspec_pri;
3222 		(void) (*psm_intr_ops)(rdip, hdlp,
3223 		    PSM_INTR_OP_NAVAIL_VECTORS, result);
3224 		break;
3225 	default:
3226 		return (DDI_FAILURE);
3227 	}
3228 
3229 	return (DDI_SUCCESS);
3230 }
3231 
3232 
3233 /*
3234  * Get the physical address of an object described by "dmareq".
3235  * A "segsize" of zero is used to initialize the priv_handle *php.
3236  * Subsequent calls with a non zero "segsize" would get the corresponding
3237  * physical address of the dma object.
3238  * The function returns a 64 bit physical address.
3239  */
3240 uint64_t
3241 rootnex_get_phyaddr(struct ddi_dma_req *dmareq, uint_t segsize,
3242     struct priv_handle *php)
3243 {
3244 	size_t	offset;
3245 	page_t	*pp, **pplist;
3246 	caddr_t  vaddr, bvaddr;
3247 	struct as *asp;
3248 	int	index;
3249 	uint64_t segmentpadr;
3250 
3251 	switch (dmareq->dmar_object.dmao_type) {
3252 	case DMA_OTYP_PAGES:
3253 		if (segsize) {
3254 			pp = php->ph_u.pp;
3255 			vaddr = php->ph_vaddr;
3256 			offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
3257 			vaddr += segsize;
3258 			if ((offset += segsize) >= MMU_PAGESIZE) {
3259 				/*
3260 				 * crossed page boundary, get to the next page.
3261 				 */
3262 				offset &= MMU_PAGEOFFSET;
3263 				pp = pp->p_next;
3264 			}
3265 		} else {
3266 			/*
3267 			 * Initialize the priv_handle structure.
3268 			 */
3269 			pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
3270 			offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
3271 			vaddr = (caddr_t)offset;
3272 			php->ph_mapinfo = DMAMI_PAGES;
3273 		}
3274 		php->ph_u.pp = pp;
3275 		php->ph_vaddr = vaddr;
3276 		segmentpadr = (uint64_t)offset + ptob64(page_pptonum(pp));
3277 		break;
3278 	case DMA_OTYP_VADDR:
3279 	case DMA_OTYP_BUFVADDR:
3280 		if (segsize) {
3281 			asp = php->ph_u.asp;
3282 			vaddr = php->ph_vaddr;
3283 			vaddr += segsize;
3284 		} else {
3285 			/*
3286 			 * Initialize the priv_handle structure.
3287 			 */
3288 			vaddr = dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
3289 			asp = dmareq->dmar_object.dmao_obj.virt_obj.v_as;
3290 			if (asp == NULL) {
3291 				php->ph_mapinfo = DMAMI_KVADR;
3292 				asp = &kas;
3293 			} else {
3294 				php->ph_mapinfo = DMAMI_UVADR;
3295 			}
3296 			php->ph_u.asp = asp;
3297 		}
3298 		pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
3299 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
3300 		if (pplist == NULL) {
3301 			segmentpadr = (uint64_t)offset +
3302 				ptob64(hat_getpfnum(asp->a_hat, vaddr));
3303 		} else {
3304 		    bvaddr = dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
3305 		    index = btop(((ulong_t)bvaddr & MMU_PAGEOFFSET) +
3306 			vaddr - bvaddr);
3307 		    segmentpadr = (uint64_t)offset +
3308 			ptob64(page_pptonum(pplist[index]));
3309 		}
3310 		php->ph_vaddr = vaddr;
3311 		break;
3312 	default:
3313 		panic("rootnex_get_phyaddr");
3314 		/*NOTREACHED*/
3315 	}
3316 	return (segmentpadr);
3317 }
3318