xref: /titanic_53/usr/src/uts/i86pc/io/rootnex.c (revision 12f080e7d03a5a6c62c85f0005491e9e4d355cfb)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
237c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate /*
30*12f080e7Smrj  * x86 root nexus driver
317c478bd9Sstevel@tonic-gate  */
327c478bd9Sstevel@tonic-gate 
337c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
347c478bd9Sstevel@tonic-gate #include <sys/conf.h>
357c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
367c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
377c478bd9Sstevel@tonic-gate #include <sys/debug.h>
387c478bd9Sstevel@tonic-gate #include <sys/psw.h>
397c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h>
407c478bd9Sstevel@tonic-gate #include <sys/promif.h>
417c478bd9Sstevel@tonic-gate #include <sys/devops.h>
427c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
437c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
447c478bd9Sstevel@tonic-gate #include <vm/seg.h>
457c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
467c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h>
477c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
487c478bd9Sstevel@tonic-gate #include <sys/mman.h>
497c478bd9Sstevel@tonic-gate #include <vm/hat.h>
507c478bd9Sstevel@tonic-gate #include <vm/as.h>
517c478bd9Sstevel@tonic-gate #include <vm/page.h>
527c478bd9Sstevel@tonic-gate #include <sys/avintr.h>
537c478bd9Sstevel@tonic-gate #include <sys/errno.h>
547c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
557c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
567c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
577c478bd9Sstevel@tonic-gate #include <sys/sunndi.h>
587c478bd9Sstevel@tonic-gate #include <sys/psm.h>
597c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
60*12f080e7Smrj #include <sys/atomic.h>
61*12f080e7Smrj #include <sys/sdt.h>
62*12f080e7Smrj #include <sys/rootnex.h>
63*12f080e7Smrj #include <vm/hat_i86.h>
647c478bd9Sstevel@tonic-gate 
657c478bd9Sstevel@tonic-gate 
66*12f080e7Smrj /*
67*12f080e7Smrj  * enable/disable extra checking of function parameters. Useful for debugging
68*12f080e7Smrj  * drivers.
69*12f080e7Smrj  */
70*12f080e7Smrj #ifdef	DEBUG
71*12f080e7Smrj int rootnex_alloc_check_parms = 1;
72*12f080e7Smrj int rootnex_bind_check_parms = 1;
73*12f080e7Smrj int rootnex_bind_check_inuse = 1;
74*12f080e7Smrj int rootnex_unbind_verify_buffer = 0;
75*12f080e7Smrj int rootnex_sync_check_parms = 1;
76*12f080e7Smrj #else
77*12f080e7Smrj int rootnex_alloc_check_parms = 0;
78*12f080e7Smrj int rootnex_bind_check_parms = 0;
79*12f080e7Smrj int rootnex_bind_check_inuse = 0;
80*12f080e7Smrj int rootnex_unbind_verify_buffer = 0;
81*12f080e7Smrj int rootnex_sync_check_parms = 0;
82*12f080e7Smrj #endif
837c478bd9Sstevel@tonic-gate 
84*12f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
857c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1;
867c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1;
877c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list;
887c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
897c478bd9Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate /*
92*12f080e7Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
93*12f080e7Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
94*12f080e7Smrj  * the offset and size into ddi_dma_sync().
957c478bd9Sstevel@tonic-gate  */
96*12f080e7Smrj int rootnex_sync_ignore_params = 0;
977c478bd9Sstevel@tonic-gate 
987c478bd9Sstevel@tonic-gate /*
99*12f080e7Smrj  * maximum size that we will allow for a copy buffer. Can be patched on the
100*12f080e7Smrj  * fly
1017c478bd9Sstevel@tonic-gate  */
102*12f080e7Smrj size_t rootnex_max_copybuf_size = 0x100000;
1037c478bd9Sstevel@tonic-gate 
1047c478bd9Sstevel@tonic-gate /*
105*12f080e7Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
106*12f080e7Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
107*12f080e7Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
108*12f080e7Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
109*12f080e7Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
110*12f080e7Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
111*12f080e7Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
112*12f080e7Smrj  * attach.
1137c478bd9Sstevel@tonic-gate  */
114*12f080e7Smrj #if defined(__amd64)
115*12f080e7Smrj int rootnex_prealloc_cookies = 65;
116*12f080e7Smrj int rootnex_prealloc_windows = 4;
117*12f080e7Smrj int rootnex_prealloc_copybuf = 2;
118*12f080e7Smrj #else
119*12f080e7Smrj int rootnex_prealloc_cookies = 33;
120*12f080e7Smrj int rootnex_prealloc_windows = 4;
121*12f080e7Smrj int rootnex_prealloc_copybuf = 2;
122*12f080e7Smrj #endif
1237c478bd9Sstevel@tonic-gate 
124*12f080e7Smrj /* driver global state */
125*12f080e7Smrj static rootnex_state_t *rootnex_state;
126*12f080e7Smrj 
127*12f080e7Smrj /* shortcut to rootnex counters */
128*12f080e7Smrj static uint64_t *rootnex_cnt;
1297c478bd9Sstevel@tonic-gate 
1307c478bd9Sstevel@tonic-gate /*
131*12f080e7Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1327c478bd9Sstevel@tonic-gate  */
133*12f080e7Smrj /* statically defined integer/boolean properties for the root node */
134*12f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = {
135*12f080e7Smrj 	{ "PAGESIZE",			PAGESIZE },
136*12f080e7Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
137*12f080e7Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
138*12f080e7Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
139*12f080e7Smrj };
140*12f080e7Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
1417c478bd9Sstevel@tonic-gate 
1427c478bd9Sstevel@tonic-gate 
143*12f080e7Smrj static struct cb_ops rootnex_cb_ops = {
144*12f080e7Smrj 	nodev,		/* open */
145*12f080e7Smrj 	nodev,		/* close */
146*12f080e7Smrj 	nodev,		/* strategy */
147*12f080e7Smrj 	nodev,		/* print */
148*12f080e7Smrj 	nodev,		/* dump */
149*12f080e7Smrj 	nodev,		/* read */
150*12f080e7Smrj 	nodev,		/* write */
151*12f080e7Smrj 	nodev,		/* ioctl */
152*12f080e7Smrj 	nodev,		/* devmap */
153*12f080e7Smrj 	nodev,		/* mmap */
154*12f080e7Smrj 	nodev,		/* segmap */
155*12f080e7Smrj 	nochpoll,	/* chpoll */
156*12f080e7Smrj 	ddi_prop_op,	/* cb_prop_op */
157*12f080e7Smrj 	NULL,		/* struct streamtab */
158*12f080e7Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
159*12f080e7Smrj 	CB_REV,		/* Rev */
160*12f080e7Smrj 	nodev,		/* cb_aread */
161*12f080e7Smrj 	nodev		/* cb_awrite */
162*12f080e7Smrj };
1637c478bd9Sstevel@tonic-gate 
164*12f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1657c478bd9Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
166*12f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1677c478bd9Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1687c478bd9Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
169*12f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1707c478bd9Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
171*12f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
172*12f080e7Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
173*12f080e7Smrj     ddi_dma_handle_t *handlep);
174*12f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
175*12f080e7Smrj     ddi_dma_handle_t handle);
176*12f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
177*12f080e7Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
178*12f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
179*12f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
180*12f080e7Smrj     ddi_dma_handle_t handle);
181*12f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
182*12f080e7Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
183*12f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
184*12f080e7Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
185*12f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
186*12f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1877c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1887c478bd9Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
189*12f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
190*12f080e7Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
191*12f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
192*12f080e7Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
1937c478bd9Sstevel@tonic-gate 
1947c478bd9Sstevel@tonic-gate 
1957c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
1967c478bd9Sstevel@tonic-gate 	BUSO_REV,
1977c478bd9Sstevel@tonic-gate 	rootnex_map,
1987c478bd9Sstevel@tonic-gate 	NULL,
1997c478bd9Sstevel@tonic-gate 	NULL,
2007c478bd9Sstevel@tonic-gate 	NULL,
2017c478bd9Sstevel@tonic-gate 	rootnex_map_fault,
2027c478bd9Sstevel@tonic-gate 	rootnex_dma_map,
2037c478bd9Sstevel@tonic-gate 	rootnex_dma_allochdl,
2047c478bd9Sstevel@tonic-gate 	rootnex_dma_freehdl,
2057c478bd9Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2067c478bd9Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
207*12f080e7Smrj 	rootnex_dma_sync,
2087c478bd9Sstevel@tonic-gate 	rootnex_dma_win,
2097c478bd9Sstevel@tonic-gate 	rootnex_dma_mctl,
2107c478bd9Sstevel@tonic-gate 	rootnex_ctlops,
2117c478bd9Sstevel@tonic-gate 	ddi_bus_prop_op,
2127c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2137c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2147c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2157c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2167c478bd9Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2177c478bd9Sstevel@tonic-gate 	0,			/* bus_config */
2187c478bd9Sstevel@tonic-gate 	0,			/* bus_unconfig */
2197c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_init */
2207c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2217c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2227c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2237c478bd9Sstevel@tonic-gate 	NULL,			/* bus_powr */
2247c478bd9Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2257c478bd9Sstevel@tonic-gate };
2267c478bd9Sstevel@tonic-gate 
227*12f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
228*12f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2297c478bd9Sstevel@tonic-gate 
2307c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2317c478bd9Sstevel@tonic-gate 	DEVO_REV,
232*12f080e7Smrj 	0,
233*12f080e7Smrj 	ddi_no_info,
2347c478bd9Sstevel@tonic-gate 	nulldev,
235*12f080e7Smrj 	nulldev,
2367c478bd9Sstevel@tonic-gate 	rootnex_attach,
237*12f080e7Smrj 	rootnex_detach,
238*12f080e7Smrj 	nulldev,
239*12f080e7Smrj 	&rootnex_cb_ops,
2407c478bd9Sstevel@tonic-gate 	&rootnex_bus_ops
2417c478bd9Sstevel@tonic-gate };
2427c478bd9Sstevel@tonic-gate 
243*12f080e7Smrj static struct modldrv rootnex_modldrv = {
244*12f080e7Smrj 	&mod_driverops,
2457c478bd9Sstevel@tonic-gate 	"i86pc root nexus %I%",
246*12f080e7Smrj 	&rootnex_ops
2477c478bd9Sstevel@tonic-gate };
2487c478bd9Sstevel@tonic-gate 
249*12f080e7Smrj static struct modlinkage rootnex_modlinkage = {
250*12f080e7Smrj 	MODREV_1,
251*12f080e7Smrj 	(void *)&rootnex_modldrv,
252*12f080e7Smrj 	NULL
2537c478bd9Sstevel@tonic-gate };
2547c478bd9Sstevel@tonic-gate 
2557c478bd9Sstevel@tonic-gate 
256*12f080e7Smrj /*
257*12f080e7Smrj  *  extern hacks
258*12f080e7Smrj  */
259*12f080e7Smrj extern struct seg_ops segdev_ops;
260*12f080e7Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
261*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
262*12f080e7Smrj extern int ddi_map_debug_flag;
263*12f080e7Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
264*12f080e7Smrj #endif
265*12f080e7Smrj #define	ptob64(x)	(((uint64_t)(x)) << MMU_PAGESHIFT)
266*12f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
267*12f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
268*12f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
269*12f080e7Smrj     psm_intr_op_t, int *);
270*12f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
271*12f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
272*12f080e7Smrj /*
273*12f080e7Smrj  * Use device arena to use for device control register mappings.
274*12f080e7Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
275*12f080e7Smrj  * to avoid this address range to prevent undesired device activity.
276*12f080e7Smrj  */
277*12f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
278*12f080e7Smrj extern void device_arena_free(void * vaddr, size_t size);
279*12f080e7Smrj 
280*12f080e7Smrj 
281*12f080e7Smrj /*
282*12f080e7Smrj  *  Internal functions
283*12f080e7Smrj  */
284*12f080e7Smrj static int rootnex_dma_init();
285*12f080e7Smrj static void rootnex_add_props(dev_info_t *);
286*12f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
287*12f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
288*12f080e7Smrj static int rootnex_xlate_intrs(dev_info_t *dip, dev_info_t *rdip, int *in,
289*12f080e7Smrj     struct ddi_parent_private_data *pdptr);
290*12f080e7Smrj static int rootnex_ctlops_poke(peekpoke_ctlops_t *in_args);
291*12f080e7Smrj static int rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result);
292*12f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
293*12f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
294*12f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
295*12f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
296*12f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
297*12f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
298*12f080e7Smrj     ddi_dma_attr_t *attr);
299*12f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
300*12f080e7Smrj     rootnex_sglinfo_t *sglinfo);
301*12f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
302*12f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
303*12f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
304*12f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
305*12f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
306*12f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
307*12f080e7Smrj     ddi_dma_attr_t *attr, int kmflag);
308*12f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
309*12f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
310*12f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
311*12f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
312*12f080e7Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
313*12f080e7Smrj     size_t *copybuf_used, page_t **cur_pp);
314*12f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
315*12f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
316*12f080e7Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
317*12f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
318*12f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
319*12f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
320*12f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
321*12f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
322*12f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
323*12f080e7Smrj     off_t offset, size_t size, uint_t cache_flags);
324*12f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
325*12f080e7Smrj 
326*12f080e7Smrj 
327*12f080e7Smrj /*
328*12f080e7Smrj  * _init()
329*12f080e7Smrj  *
330*12f080e7Smrj  */
3317c478bd9Sstevel@tonic-gate int
3327c478bd9Sstevel@tonic-gate _init(void)
3337c478bd9Sstevel@tonic-gate {
334*12f080e7Smrj 
335*12f080e7Smrj 	rootnex_state = NULL;
336*12f080e7Smrj 	return (mod_install(&rootnex_modlinkage));
3377c478bd9Sstevel@tonic-gate }
3387c478bd9Sstevel@tonic-gate 
339*12f080e7Smrj 
340*12f080e7Smrj /*
341*12f080e7Smrj  * _info()
342*12f080e7Smrj  *
343*12f080e7Smrj  */
344*12f080e7Smrj int
345*12f080e7Smrj _info(struct modinfo *modinfop)
346*12f080e7Smrj {
347*12f080e7Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
348*12f080e7Smrj }
349*12f080e7Smrj 
350*12f080e7Smrj 
351*12f080e7Smrj /*
352*12f080e7Smrj  * _fini()
353*12f080e7Smrj  *
354*12f080e7Smrj  */
3557c478bd9Sstevel@tonic-gate int
3567c478bd9Sstevel@tonic-gate _fini(void)
3577c478bd9Sstevel@tonic-gate {
3587c478bd9Sstevel@tonic-gate 	return (EBUSY);
3597c478bd9Sstevel@tonic-gate }
3607c478bd9Sstevel@tonic-gate 
361*12f080e7Smrj 
362*12f080e7Smrj /*
363*12f080e7Smrj  * rootnex_attach()
364*12f080e7Smrj  *
365*12f080e7Smrj  */
366*12f080e7Smrj static int
367*12f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3687c478bd9Sstevel@tonic-gate {
369*12f080e7Smrj 	int e;
370*12f080e7Smrj 
371*12f080e7Smrj 
372*12f080e7Smrj 	switch (cmd) {
373*12f080e7Smrj 	case DDI_ATTACH:
374*12f080e7Smrj 		break;
375*12f080e7Smrj 	case DDI_RESUME:
376*12f080e7Smrj 		return (DDI_SUCCESS);
377*12f080e7Smrj 	default:
378*12f080e7Smrj 		return (DDI_FAILURE);
3797c478bd9Sstevel@tonic-gate 	}
3807c478bd9Sstevel@tonic-gate 
3817c478bd9Sstevel@tonic-gate 	/*
382*12f080e7Smrj 	 * We should only have one instance of rootnex. Save it away since we
383*12f080e7Smrj 	 * don't have an easy way to get it back later.
3847c478bd9Sstevel@tonic-gate 	 */
385*12f080e7Smrj 	ASSERT(rootnex_state == NULL);
386*12f080e7Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
3877c478bd9Sstevel@tonic-gate 
388*12f080e7Smrj 	rootnex_state->r_dip = dip;
389*12f080e7Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
390*12f080e7Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
3917c478bd9Sstevel@tonic-gate 
392*12f080e7Smrj 	mutex_init(&rootnex_state->r_peekpoke_mutex, NULL, MUTEX_SPIN,
393*12f080e7Smrj 	    (void *)ipltospl(15));
394*12f080e7Smrj 
395*12f080e7Smrj 	/* initialize DMA related state */
396*12f080e7Smrj 	e = rootnex_dma_init();
397*12f080e7Smrj 	if (e != DDI_SUCCESS) {
398*12f080e7Smrj 		mutex_destroy(&rootnex_state->r_peekpoke_mutex);
399*12f080e7Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
400*12f080e7Smrj 		return (DDI_FAILURE);
401*12f080e7Smrj 	}
402*12f080e7Smrj 
403*12f080e7Smrj 	/* Add static root node properties */
404*12f080e7Smrj 	rootnex_add_props(dip);
405*12f080e7Smrj 
406*12f080e7Smrj 	/* since we can't call ddi_report_dev() */
407*12f080e7Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
408*12f080e7Smrj 
409*12f080e7Smrj 	/* Initialize rootnex event handle */
410*12f080e7Smrj 	i_ddi_rootnex_init_events(dip);
411*12f080e7Smrj 
412*12f080e7Smrj 	return (DDI_SUCCESS);
413*12f080e7Smrj }
414*12f080e7Smrj 
415*12f080e7Smrj 
416*12f080e7Smrj /*
417*12f080e7Smrj  * rootnex_detach()
418*12f080e7Smrj  *
419*12f080e7Smrj  */
4207c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4217c478bd9Sstevel@tonic-gate static int
422*12f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4237c478bd9Sstevel@tonic-gate {
424*12f080e7Smrj 	switch (cmd) {
425*12f080e7Smrj 	case DDI_SUSPEND:
426*12f080e7Smrj 		break;
427*12f080e7Smrj 	default:
428*12f080e7Smrj 		return (DDI_FAILURE);
429*12f080e7Smrj 	}
4307c478bd9Sstevel@tonic-gate 
431*12f080e7Smrj 	return (DDI_SUCCESS);
432*12f080e7Smrj }
4337c478bd9Sstevel@tonic-gate 
4347c478bd9Sstevel@tonic-gate 
435*12f080e7Smrj /*
436*12f080e7Smrj  * rootnex_dma_init()
437*12f080e7Smrj  *
438*12f080e7Smrj  */
439*12f080e7Smrj /*ARGSUSED*/
440*12f080e7Smrj static int
441*12f080e7Smrj rootnex_dma_init()
442*12f080e7Smrj {
443*12f080e7Smrj 	size_t bufsize;
444*12f080e7Smrj 
445*12f080e7Smrj 
446*12f080e7Smrj 	/*
447*12f080e7Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
448*12f080e7Smrj 	 * pre-alloc in dma_alloc_handle
449*12f080e7Smrj 	 */
450*12f080e7Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
451*12f080e7Smrj 	rootnex_state->r_prealloc_size =
452*12f080e7Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
453*12f080e7Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
454*12f080e7Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
455*12f080e7Smrj 
456*12f080e7Smrj 	/*
457*12f080e7Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
458*12f080e7Smrj 	 * allocate 16 extra bytes for struct pointer alignment
459*12f080e7Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
460*12f080e7Smrj 	 */
461*12f080e7Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
462*12f080e7Smrj 	    rootnex_state->r_prealloc_size + 0x10;
463*12f080e7Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
464*12f080e7Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
465*12f080e7Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
466*12f080e7Smrj 		return (DDI_FAILURE);
467*12f080e7Smrj 	}
4687c478bd9Sstevel@tonic-gate 
4697c478bd9Sstevel@tonic-gate 	/*
4707c478bd9Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
4717c478bd9Sstevel@tonic-gate 	 * for.
4727c478bd9Sstevel@tonic-gate 	 */
4737c478bd9Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
4747c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
4757c478bd9Sstevel@tonic-gate 
4767c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
4777c478bd9Sstevel@tonic-gate }
4787c478bd9Sstevel@tonic-gate 
4797c478bd9Sstevel@tonic-gate 
4807c478bd9Sstevel@tonic-gate /*
481*12f080e7Smrj  * rootnex_add_props()
482*12f080e7Smrj  *
4837c478bd9Sstevel@tonic-gate  */
4847c478bd9Sstevel@tonic-gate static void
485*12f080e7Smrj rootnex_add_props(dev_info_t *dip)
4867c478bd9Sstevel@tonic-gate {
487*12f080e7Smrj 	rootnex_intprop_t *rpp;
4887c478bd9Sstevel@tonic-gate 	int i;
4897c478bd9Sstevel@tonic-gate 
490*12f080e7Smrj 	/* Add static integer/boolean properties to the root node */
491*12f080e7Smrj 	rpp = rootnex_intprp;
492*12f080e7Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
493*12f080e7Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
494*12f080e7Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
495*12f080e7Smrj 	}
4967c478bd9Sstevel@tonic-gate }
4977c478bd9Sstevel@tonic-gate 
498*12f080e7Smrj 
499*12f080e7Smrj 
5007c478bd9Sstevel@tonic-gate /*
501*12f080e7Smrj  * *************************
502*12f080e7Smrj  *  ctlops related routines
503*12f080e7Smrj  * *************************
504*12f080e7Smrj  */
505*12f080e7Smrj 
506*12f080e7Smrj /*
507*12f080e7Smrj  * rootnex_ctlops()
5087c478bd9Sstevel@tonic-gate  *
5097c478bd9Sstevel@tonic-gate  */
5107c478bd9Sstevel@tonic-gate static int
511*12f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
512*12f080e7Smrj     void *arg, void *result)
5137c478bd9Sstevel@tonic-gate {
514*12f080e7Smrj 	int n, *ptr;
515*12f080e7Smrj 	struct ddi_parent_private_data *pdp;
5167c478bd9Sstevel@tonic-gate 
5177c478bd9Sstevel@tonic-gate 
518*12f080e7Smrj 	switch (ctlop) {
519*12f080e7Smrj 	case DDI_CTLOPS_DMAPMAPC:
5207c478bd9Sstevel@tonic-gate 		/*
521*12f080e7Smrj 		 * Return 'partial' to indicate that dma mapping
522*12f080e7Smrj 		 * has to be done in the main MMU.
5237c478bd9Sstevel@tonic-gate 		 */
524*12f080e7Smrj 		return (DDI_DMA_PARTIAL);
5257c478bd9Sstevel@tonic-gate 
526*12f080e7Smrj 	case DDI_CTLOPS_BTOP:
5277c478bd9Sstevel@tonic-gate 		/*
528*12f080e7Smrj 		 * Convert byte count input to physical page units.
529*12f080e7Smrj 		 * (byte counts that are not a page-size multiple
530*12f080e7Smrj 		 * are rounded down)
5317c478bd9Sstevel@tonic-gate 		 */
532*12f080e7Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
5337c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5347c478bd9Sstevel@tonic-gate 
535*12f080e7Smrj 	case DDI_CTLOPS_PTOB:
5367c478bd9Sstevel@tonic-gate 		/*
537*12f080e7Smrj 		 * Convert size in physical pages to bytes
5387c478bd9Sstevel@tonic-gate 		 */
539*12f080e7Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
5407c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5417c478bd9Sstevel@tonic-gate 
542*12f080e7Smrj 	case DDI_CTLOPS_BTOPR:
5437c478bd9Sstevel@tonic-gate 		/*
544*12f080e7Smrj 		 * Convert byte count input to physical page units
545*12f080e7Smrj 		 * (byte counts that are not a page-size multiple
546*12f080e7Smrj 		 * are rounded up)
5477c478bd9Sstevel@tonic-gate 		 */
548*12f080e7Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
549*12f080e7Smrj 		return (DDI_SUCCESS);
550*12f080e7Smrj 
551*12f080e7Smrj 	case DDI_CTLOPS_POKE:
552*12f080e7Smrj 		return (rootnex_ctlops_poke((peekpoke_ctlops_t *)arg));
553*12f080e7Smrj 
554*12f080e7Smrj 	case DDI_CTLOPS_PEEK:
555*12f080e7Smrj 		return (rootnex_ctlops_peek((peekpoke_ctlops_t *)arg, result));
556*12f080e7Smrj 
557*12f080e7Smrj 	case DDI_CTLOPS_INITCHILD:
558*12f080e7Smrj 		return (impl_ddi_sunbus_initchild(arg));
559*12f080e7Smrj 
560*12f080e7Smrj 	case DDI_CTLOPS_UNINITCHILD:
561*12f080e7Smrj 		impl_ddi_sunbus_removechild(arg);
562*12f080e7Smrj 		return (DDI_SUCCESS);
563*12f080e7Smrj 
564*12f080e7Smrj 	case DDI_CTLOPS_REPORTDEV:
565*12f080e7Smrj 		return (rootnex_ctl_reportdev(rdip));
566*12f080e7Smrj 
567*12f080e7Smrj 	case DDI_CTLOPS_IOMIN:
5687c478bd9Sstevel@tonic-gate 		/*
569*12f080e7Smrj 		 * Nothing to do here but reflect back..
5707c478bd9Sstevel@tonic-gate 		 */
5717c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5727c478bd9Sstevel@tonic-gate 
573*12f080e7Smrj 	case DDI_CTLOPS_REGSIZE:
574*12f080e7Smrj 	case DDI_CTLOPS_NREGS:
575*12f080e7Smrj 	case DDI_CTLOPS_NINTRS:
576*12f080e7Smrj 		break;
5777c478bd9Sstevel@tonic-gate 
578*12f080e7Smrj 	case DDI_CTLOPS_SIDDEV:
579*12f080e7Smrj 		if (ndi_dev_is_prom_node(rdip))
5807c478bd9Sstevel@tonic-gate 			return (DDI_SUCCESS);
581*12f080e7Smrj 		if (ndi_dev_is_persistent_node(rdip))
582*12f080e7Smrj 			return (DDI_SUCCESS);
5837c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
5847c478bd9Sstevel@tonic-gate 
585*12f080e7Smrj 	case DDI_CTLOPS_INTR_HILEVEL:
5867c478bd9Sstevel@tonic-gate 		/*
587*12f080e7Smrj 		 * Indicate whether the interrupt specified is to be handled
588*12f080e7Smrj 		 * above lock level.  In other words, above the level that
589*12f080e7Smrj 		 * cv_signal and default type mutexes can be used.
5907c478bd9Sstevel@tonic-gate 		 */
591*12f080e7Smrj 		*(int *)result =
592*12f080e7Smrj 		    (INT_IPL(((struct intrspec *)arg)->intrspec_pri)
593*12f080e7Smrj 		    > LOCK_LEVEL);
594*12f080e7Smrj 		return (DDI_SUCCESS);
5957c478bd9Sstevel@tonic-gate 
596*12f080e7Smrj 	case DDI_CTLOPS_XLATE_INTRS:
597*12f080e7Smrj 		return (rootnex_xlate_intrs(dip, rdip, arg, result));
598*12f080e7Smrj 
599*12f080e7Smrj 	case DDI_CTLOPS_POWER:
600*12f080e7Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
601*12f080e7Smrj 
602*12f080e7Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
603*12f080e7Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
604*12f080e7Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
605*12f080e7Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
606*12f080e7Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
607*12f080e7Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
608*12f080e7Smrj 			    "1 or more reserved/obsolete operations.");
6097c478bd9Sstevel@tonic-gate 		}
610*12f080e7Smrj 		return (DDI_FAILURE);
6117c478bd9Sstevel@tonic-gate 
6127c478bd9Sstevel@tonic-gate 	default:
6137c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6147c478bd9Sstevel@tonic-gate 	}
615*12f080e7Smrj 	/*
616*12f080e7Smrj 	 * The rest are for "hardware" properties
617*12f080e7Smrj 	 */
618*12f080e7Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
619*12f080e7Smrj 		return (DDI_FAILURE);
6207c478bd9Sstevel@tonic-gate 
621*12f080e7Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
622*12f080e7Smrj 		ptr = (int *)result;
623*12f080e7Smrj 		*ptr = pdp->par_nreg;
624*12f080e7Smrj 	} else if (ctlop == DDI_CTLOPS_NINTRS) {
625*12f080e7Smrj 		ptr = (int *)result;
626*12f080e7Smrj 		*ptr = pdp->par_nintr;
627*12f080e7Smrj 	} else {
628*12f080e7Smrj 		off_t *size = (off_t *)result;
6297c478bd9Sstevel@tonic-gate 
630*12f080e7Smrj 		ptr = (int *)arg;
631*12f080e7Smrj 		n = *ptr;
632*12f080e7Smrj 		if (n >= pdp->par_nreg) {
633*12f080e7Smrj 			return (DDI_FAILURE);
634*12f080e7Smrj 		}
635*12f080e7Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
636*12f080e7Smrj 	}
6377c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
6387c478bd9Sstevel@tonic-gate }
6397c478bd9Sstevel@tonic-gate 
640*12f080e7Smrj 
641*12f080e7Smrj /*
642*12f080e7Smrj  * rootnex_ctl_reportdev()
643*12f080e7Smrj  *
644*12f080e7Smrj  */
6457c478bd9Sstevel@tonic-gate static int
646*12f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev)
647*12f080e7Smrj {
648*12f080e7Smrj 	int i, n, len, f_len = 0;
649*12f080e7Smrj 	char *buf;
650*12f080e7Smrj 
651*12f080e7Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
652*12f080e7Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
653*12f080e7Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
654*12f080e7Smrj 	len = strlen(buf);
655*12f080e7Smrj 
656*12f080e7Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
657*12f080e7Smrj 
658*12f080e7Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
659*12f080e7Smrj 
660*12f080e7Smrj 		if (i == 0)
661*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
662*12f080e7Smrj 			    ": ");
663*12f080e7Smrj 		else
664*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
665*12f080e7Smrj 			    " and ");
666*12f080e7Smrj 		len = strlen(buf);
667*12f080e7Smrj 
668*12f080e7Smrj 		switch (rp->regspec_bustype) {
669*12f080e7Smrj 
670*12f080e7Smrj 		case BTEISA:
671*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
672*12f080e7Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
673*12f080e7Smrj 			break;
674*12f080e7Smrj 
675*12f080e7Smrj 		case BTISA:
676*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
677*12f080e7Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
678*12f080e7Smrj 			break;
679*12f080e7Smrj 
680*12f080e7Smrj 		default:
681*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
682*12f080e7Smrj 			    "space %x offset %x",
683*12f080e7Smrj 			    rp->regspec_bustype, rp->regspec_addr);
684*12f080e7Smrj 			break;
685*12f080e7Smrj 		}
686*12f080e7Smrj 		len = strlen(buf);
687*12f080e7Smrj 	}
688*12f080e7Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
689*12f080e7Smrj 		int pri;
690*12f080e7Smrj 
691*12f080e7Smrj 		if (i != 0) {
692*12f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
693*12f080e7Smrj 			    ",");
694*12f080e7Smrj 			len = strlen(buf);
695*12f080e7Smrj 		}
696*12f080e7Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
697*12f080e7Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
698*12f080e7Smrj 		    " sparc ipl %d", pri);
699*12f080e7Smrj 		len = strlen(buf);
700*12f080e7Smrj 	}
701*12f080e7Smrj #ifdef DEBUG
702*12f080e7Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
703*12f080e7Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
704*12f080e7Smrj 		    "printed length 1024, real length %d", f_len);
705*12f080e7Smrj 	}
706*12f080e7Smrj #endif /* DEBUG */
707*12f080e7Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
708*12f080e7Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
709*12f080e7Smrj 	return (DDI_SUCCESS);
710*12f080e7Smrj }
711*12f080e7Smrj 
712*12f080e7Smrj 
713*12f080e7Smrj /*
714*12f080e7Smrj  * rootnex_ctlops_poke()
715*12f080e7Smrj  *
716*12f080e7Smrj  */
717*12f080e7Smrj static int
718*12f080e7Smrj rootnex_ctlops_poke(peekpoke_ctlops_t *in_args)
719*12f080e7Smrj {
720*12f080e7Smrj 	int err = DDI_SUCCESS;
721*12f080e7Smrj 	on_trap_data_t otd;
722*12f080e7Smrj 
723*12f080e7Smrj 	/* Cautious access not supported. */
724*12f080e7Smrj 	if (in_args->handle != NULL)
725*12f080e7Smrj 		return (DDI_FAILURE);
726*12f080e7Smrj 
727*12f080e7Smrj 	mutex_enter(&rootnex_state->r_peekpoke_mutex);
728*12f080e7Smrj 
729*12f080e7Smrj 	/* Set up protected environment. */
730*12f080e7Smrj 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
731*12f080e7Smrj 		switch (in_args->size) {
732*12f080e7Smrj 		case sizeof (uint8_t):
733*12f080e7Smrj 			*(uint8_t *)in_args->dev_addr = *(uint8_t *)
734*12f080e7Smrj 			    in_args->host_addr;
735*12f080e7Smrj 			break;
736*12f080e7Smrj 
737*12f080e7Smrj 		case sizeof (uint16_t):
738*12f080e7Smrj 			*(uint16_t *)in_args->dev_addr =
739*12f080e7Smrj 			    *(uint16_t *)in_args->host_addr;
740*12f080e7Smrj 			break;
741*12f080e7Smrj 
742*12f080e7Smrj 		case sizeof (uint32_t):
743*12f080e7Smrj 			*(uint32_t *)in_args->dev_addr =
744*12f080e7Smrj 			    *(uint32_t *)in_args->host_addr;
745*12f080e7Smrj 			break;
746*12f080e7Smrj 
747*12f080e7Smrj 		case sizeof (uint64_t):
748*12f080e7Smrj 			*(uint64_t *)in_args->dev_addr =
749*12f080e7Smrj 			    *(uint64_t *)in_args->host_addr;
750*12f080e7Smrj 			break;
751*12f080e7Smrj 
752*12f080e7Smrj 		default:
753*12f080e7Smrj 			err = DDI_FAILURE;
754*12f080e7Smrj 			break;
755*12f080e7Smrj 		}
756*12f080e7Smrj 	} else
757*12f080e7Smrj 		err = DDI_FAILURE;
758*12f080e7Smrj 
759*12f080e7Smrj 	/* Take down protected environment. */
760*12f080e7Smrj 	no_trap();
761*12f080e7Smrj 	mutex_exit(&rootnex_state->r_peekpoke_mutex);
762*12f080e7Smrj 
763*12f080e7Smrj 	return (err);
764*12f080e7Smrj }
765*12f080e7Smrj 
766*12f080e7Smrj 
767*12f080e7Smrj /*
768*12f080e7Smrj  * rootnex_ctlops_peek()
769*12f080e7Smrj  *
770*12f080e7Smrj  */
771*12f080e7Smrj static int
772*12f080e7Smrj rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result)
773*12f080e7Smrj {
774*12f080e7Smrj 	int err = DDI_SUCCESS;
775*12f080e7Smrj 	on_trap_data_t otd;
776*12f080e7Smrj 
777*12f080e7Smrj 	/* Cautious access not supported. */
778*12f080e7Smrj 	if (in_args->handle != NULL)
779*12f080e7Smrj 		return (DDI_FAILURE);
780*12f080e7Smrj 
781*12f080e7Smrj 	mutex_enter(&rootnex_state->r_peekpoke_mutex);
782*12f080e7Smrj 
783*12f080e7Smrj 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
784*12f080e7Smrj 		switch (in_args->size) {
785*12f080e7Smrj 		case sizeof (uint8_t):
786*12f080e7Smrj 			*(uint8_t *)in_args->host_addr =
787*12f080e7Smrj 			    *(uint8_t *)in_args->dev_addr;
788*12f080e7Smrj 			break;
789*12f080e7Smrj 
790*12f080e7Smrj 		case sizeof (uint16_t):
791*12f080e7Smrj 			*(uint16_t *)in_args->host_addr =
792*12f080e7Smrj 			    *(uint16_t *)in_args->dev_addr;
793*12f080e7Smrj 			break;
794*12f080e7Smrj 
795*12f080e7Smrj 		case sizeof (uint32_t):
796*12f080e7Smrj 			*(uint32_t *)in_args->host_addr =
797*12f080e7Smrj 			    *(uint32_t *)in_args->dev_addr;
798*12f080e7Smrj 			break;
799*12f080e7Smrj 
800*12f080e7Smrj 		case sizeof (uint64_t):
801*12f080e7Smrj 			*(uint64_t *)in_args->host_addr =
802*12f080e7Smrj 			    *(uint64_t *)in_args->dev_addr;
803*12f080e7Smrj 			break;
804*12f080e7Smrj 
805*12f080e7Smrj 		default:
806*12f080e7Smrj 			err = DDI_FAILURE;
807*12f080e7Smrj 			break;
808*12f080e7Smrj 		}
809*12f080e7Smrj 		result = (void *)in_args->host_addr;
810*12f080e7Smrj 	} else
811*12f080e7Smrj 		err = DDI_FAILURE;
812*12f080e7Smrj 
813*12f080e7Smrj 	no_trap();
814*12f080e7Smrj 	mutex_exit(&rootnex_state->r_peekpoke_mutex);
815*12f080e7Smrj 
816*12f080e7Smrj 	return (err);
817*12f080e7Smrj }
818*12f080e7Smrj 
819*12f080e7Smrj 
820*12f080e7Smrj 
821*12f080e7Smrj /*
822*12f080e7Smrj  * ******************
823*12f080e7Smrj  *  map related code
824*12f080e7Smrj  * ******************
825*12f080e7Smrj  */
826*12f080e7Smrj 
827*12f080e7Smrj /*
828*12f080e7Smrj  * rootnex_map()
829*12f080e7Smrj  *
830*12f080e7Smrj  */
831*12f080e7Smrj static int
832*12f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
833*12f080e7Smrj     off_t len, caddr_t *vaddrp)
8347c478bd9Sstevel@tonic-gate {
8357c478bd9Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
8367c478bd9Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
8377c478bd9Sstevel@tonic-gate 	int error;
8387c478bd9Sstevel@tonic-gate 
8397c478bd9Sstevel@tonic-gate 	mp = &mr;
8407c478bd9Sstevel@tonic-gate 
8417c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
8427c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8437c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8447c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8457c478bd9Sstevel@tonic-gate 		break;
8467c478bd9Sstevel@tonic-gate 	default:
8477c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8487c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8497c478bd9Sstevel@tonic-gate 		    mp->map_op);
8507c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8517c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8527c478bd9Sstevel@tonic-gate 	}
8537c478bd9Sstevel@tonic-gate 
8547c478bd9Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8557c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8567c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8577c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8587c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8597c478bd9Sstevel@tonic-gate 	}
8607c478bd9Sstevel@tonic-gate 
8617c478bd9Sstevel@tonic-gate 	/*
8627c478bd9Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8637c478bd9Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8647c478bd9Sstevel@tonic-gate 	 */
8657c478bd9Sstevel@tonic-gate 
8667c478bd9Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8677c478bd9Sstevel@tonic-gate 
8687c478bd9Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8697c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8707c478bd9Sstevel@tonic-gate 		static char *out_of_range =
8717c478bd9Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8727c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8737c478bd9Sstevel@tonic-gate 
8747c478bd9Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8757c478bd9Sstevel@tonic-gate 		if (rp == NULL)  {
8767c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8777c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8787c478bd9Sstevel@tonic-gate 			    ddi_get_name(rdip));
8797c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8807c478bd9Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8817c478bd9Sstevel@tonic-gate 		}
8827c478bd9Sstevel@tonic-gate 
8837c478bd9Sstevel@tonic-gate 		/*
8847c478bd9Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8857c478bd9Sstevel@tonic-gate 		 */
8867c478bd9Sstevel@tonic-gate 
8877c478bd9Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8887c478bd9Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8897c478bd9Sstevel@tonic-gate 	}
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate 	/*
8927c478bd9Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8937c478bd9Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8947c478bd9Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8957c478bd9Sstevel@tonic-gate 	 */
8967c478bd9Sstevel@tonic-gate 
8977c478bd9Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8987c478bd9Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8997c478bd9Sstevel@tonic-gate 
9007c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9017c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT,
9027c478bd9Sstevel@tonic-gate 		"rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>"
9037c478bd9Sstevel@tonic-gate 		" offset %d len %d handle 0x%x\n",
9047c478bd9Sstevel@tonic-gate 		ddi_get_name(dip), ddi_get_name(rdip),
9057c478bd9Sstevel@tonic-gate 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
9067c478bd9Sstevel@tonic-gate 		offset, len, mp->map_handlep);
9077c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9087c478bd9Sstevel@tonic-gate 
9097c478bd9Sstevel@tonic-gate 	/*
9107c478bd9Sstevel@tonic-gate 	 * I/O or memory mapping:
9117c478bd9Sstevel@tonic-gate 	 *
9127c478bd9Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
9137c478bd9Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
9147c478bd9Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
9157c478bd9Sstevel@tonic-gate 	 */
9167c478bd9Sstevel@tonic-gate 
9177c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
9187c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
9197c478bd9Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
9207c478bd9Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
9217c478bd9Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
9227c478bd9Sstevel@tonic-gate 		return (DDI_ME_INVAL);
9237c478bd9Sstevel@tonic-gate 	}
9247c478bd9Sstevel@tonic-gate 
9257c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
9267c478bd9Sstevel@tonic-gate 		/*
9277c478bd9Sstevel@tonic-gate 		 * compatibility i/o mapping
9287c478bd9Sstevel@tonic-gate 		 */
9297c478bd9Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
9307c478bd9Sstevel@tonic-gate 	} else {
9317c478bd9Sstevel@tonic-gate 		/*
9327c478bd9Sstevel@tonic-gate 		 * Normal memory or i/o mapping
9337c478bd9Sstevel@tonic-gate 		 */
9347c478bd9Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
9357c478bd9Sstevel@tonic-gate 	}
9367c478bd9Sstevel@tonic-gate 
9377c478bd9Sstevel@tonic-gate 	if (len != 0)
9387c478bd9Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
9397c478bd9Sstevel@tonic-gate 
9407c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9417c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT,
9427c478bd9Sstevel@tonic-gate 		"             <%s,%s> <0x%x, 0x%x, 0x%d>"
9437c478bd9Sstevel@tonic-gate 		" offset %d len %d handle 0x%x\n",
9447c478bd9Sstevel@tonic-gate 		ddi_get_name(dip), ddi_get_name(rdip),
9457c478bd9Sstevel@tonic-gate 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
9467c478bd9Sstevel@tonic-gate 		offset, len, mp->map_handlep);
9477c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9487c478bd9Sstevel@tonic-gate 
9497c478bd9Sstevel@tonic-gate 	/*
9507c478bd9Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9517c478bd9Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9527c478bd9Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9537c478bd9Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9547c478bd9Sstevel@tonic-gate 	 */
9557c478bd9Sstevel@tonic-gate 
9567c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9577c478bd9Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9587c478bd9Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9597c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9607c478bd9Sstevel@tonic-gate 
9617c478bd9Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9627c478bd9Sstevel@tonic-gate 		return (error);
9637c478bd9Sstevel@tonic-gate 
9647c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
9657c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9667c478bd9Sstevel@tonic-gate 
9677c478bd9Sstevel@tonic-gate 		/*
9687c478bd9Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9697c478bd9Sstevel@tonic-gate 		 */
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9727c478bd9Sstevel@tonic-gate 
9737c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9747c478bd9Sstevel@tonic-gate 
9757c478bd9Sstevel@tonic-gate 		/*
9767c478bd9Sstevel@tonic-gate 		 * Release mapping...
9777c478bd9Sstevel@tonic-gate 		 */
9787c478bd9Sstevel@tonic-gate 
9797c478bd9Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9827c478bd9Sstevel@tonic-gate 
9837c478bd9Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9847c478bd9Sstevel@tonic-gate 
9857c478bd9Sstevel@tonic-gate 	default:
9867c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9877c478bd9Sstevel@tonic-gate 	}
9887c478bd9Sstevel@tonic-gate }
9897c478bd9Sstevel@tonic-gate 
9907c478bd9Sstevel@tonic-gate 
9917c478bd9Sstevel@tonic-gate /*
992*12f080e7Smrj  * rootnex_map_fault()
9937c478bd9Sstevel@tonic-gate  *
9947c478bd9Sstevel@tonic-gate  *	fault in mappings for requestors
9957c478bd9Sstevel@tonic-gate  */
9967c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9977c478bd9Sstevel@tonic-gate static int
998*12f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
999*12f080e7Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
1000*12f080e7Smrj     uint_t lock)
10017c478bd9Sstevel@tonic-gate {
10027c478bd9Sstevel@tonic-gate 
10037c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
10047c478bd9Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
10057c478bd9Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
10067c478bd9Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
10077c478bd9Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
10087c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
10097c478bd9Sstevel@tonic-gate 
10107c478bd9Sstevel@tonic-gate 	/*
10117c478bd9Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
10127c478bd9Sstevel@tonic-gate 	 *
10137c478bd9Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
10147c478bd9Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
10157c478bd9Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
10167c478bd9Sstevel@tonic-gate 	 */
10177c478bd9Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
10187c478bd9Sstevel@tonic-gate 		struct segdev_data *sdp =
10197c478bd9Sstevel@tonic-gate 			(struct segdev_data *)seg->s_data;
10207c478bd9Sstevel@tonic-gate 
10217c478bd9Sstevel@tonic-gate 		if (hat == NULL) {
10227c478bd9Sstevel@tonic-gate 			/*
10237c478bd9Sstevel@tonic-gate 			 * This is one plausible interpretation of
10247c478bd9Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
10257c478bd9Sstevel@tonic-gate 			 * address space hat list which by convention is
10267c478bd9Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
10277c478bd9Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
10287c478bd9Sstevel@tonic-gate 			 */
10297c478bd9Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
10307c478bd9Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
10317c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
10327c478bd9Sstevel@tonic-gate 		}
10337c478bd9Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
10347c478bd9Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
10357c478bd9Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
10367c478bd9Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
10377c478bd9Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
10387c478bd9Sstevel@tonic-gate 	} else
10397c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
10407c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
10417c478bd9Sstevel@tonic-gate }
10427c478bd9Sstevel@tonic-gate 
10437c478bd9Sstevel@tonic-gate 
10447c478bd9Sstevel@tonic-gate /*
1045*12f080e7Smrj  * rootnex_map_regspec()
1046*12f080e7Smrj  *     we don't support mapping of I/O cards above 4Gb
10477c478bd9Sstevel@tonic-gate  */
10487c478bd9Sstevel@tonic-gate static int
1049*12f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
10507c478bd9Sstevel@tonic-gate {
1051*12f080e7Smrj 	ulong_t base;
1052*12f080e7Smrj 	void *cvaddr;
1053*12f080e7Smrj 	uint_t npages, pgoffset;
1054*12f080e7Smrj 	struct regspec *rp;
1055*12f080e7Smrj 	ddi_acc_hdl_t *hp;
1056*12f080e7Smrj 	ddi_acc_impl_t *ap;
1057*12f080e7Smrj 	uint_t	hat_acc_flags;
10587c478bd9Sstevel@tonic-gate 
1059*12f080e7Smrj 	rp = mp->map_obj.rp;
1060*12f080e7Smrj 	hp = mp->map_handlep;
1061*12f080e7Smrj 
1062*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
1063*12f080e7Smrj 	ddi_map_debug(
1064*12f080e7Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1065*12f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1066*12f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
1067*12f080e7Smrj #endif	/* DDI_MAP_DEBUG */
10687c478bd9Sstevel@tonic-gate 
10697c478bd9Sstevel@tonic-gate 	/*
1070*12f080e7Smrj 	 * I/O or memory mapping
1071*12f080e7Smrj 	 *
1072*12f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1073*12f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1074*12f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
10757c478bd9Sstevel@tonic-gate 	 */
1076*12f080e7Smrj 
1077*12f080e7Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1078*12f080e7Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1079*12f080e7Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1080*12f080e7Smrj 		    rp->regspec_addr, rp->regspec_size);
1081*12f080e7Smrj 		return (DDI_FAILURE);
10827c478bd9Sstevel@tonic-gate 	}
1083*12f080e7Smrj 
1084*12f080e7Smrj 	if (rp->regspec_bustype != 0) {
10857c478bd9Sstevel@tonic-gate 		/*
1086*12f080e7Smrj 		 * I/O space - needs a handle.
10877c478bd9Sstevel@tonic-gate 		 */
10887c478bd9Sstevel@tonic-gate 		if (hp == NULL) {
1089*12f080e7Smrj 			return (DDI_FAILURE);
10907c478bd9Sstevel@tonic-gate 		}
1091*12f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1092*12f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1093*12f080e7Smrj 		impl_acc_hdl_init(hp);
10947c478bd9Sstevel@tonic-gate 
1095*12f080e7Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1096*12f080e7Smrj #ifdef  DDI_MAP_DEBUG
1097*12f080e7Smrj 			ddi_map_debug("rootnex_map_regspec: mmap() \
1098*12f080e7Smrj to I/O space is not supported.\n");
1099*12f080e7Smrj #endif  /* DDI_MAP_DEBUG */
1100*12f080e7Smrj 			return (DDI_ME_INVAL);
11017c478bd9Sstevel@tonic-gate 		} else {
11027c478bd9Sstevel@tonic-gate 			/*
1103*12f080e7Smrj 			 * 1275-compliant vs. compatibility i/o mapping
11047c478bd9Sstevel@tonic-gate 			 */
1105*12f080e7Smrj 			*vaddrp =
1106*12f080e7Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
1107*12f080e7Smrj 				((caddr_t)(uintptr_t)rp->regspec_bustype) :
1108*12f080e7Smrj 				((caddr_t)(uintptr_t)rp->regspec_addr);
11097c478bd9Sstevel@tonic-gate 		}
11107c478bd9Sstevel@tonic-gate 
1111*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
1112*12f080e7Smrj 		ddi_map_debug(
1113*12f080e7Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1114*12f080e7Smrj 		    rp->regspec_size, *vaddrp);
1115*12f080e7Smrj #endif	/* DDI_MAP_DEBUG */
1116*12f080e7Smrj 		return (DDI_SUCCESS);
11177c478bd9Sstevel@tonic-gate 	}
11187c478bd9Sstevel@tonic-gate 
11197c478bd9Sstevel@tonic-gate 	/*
1120*12f080e7Smrj 	 * Memory space
1121*12f080e7Smrj 	 */
1122*12f080e7Smrj 
1123*12f080e7Smrj 	if (hp != NULL) {
1124*12f080e7Smrj 		/*
1125*12f080e7Smrj 		 * hat layer ignores
1126*12f080e7Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
1127*12f080e7Smrj 		 */
1128*12f080e7Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
1129*12f080e7Smrj 		case DDI_STRICTORDER_ACC:
1130*12f080e7Smrj 			hat_acc_flags = HAT_STRICTORDER;
1131*12f080e7Smrj 			break;
1132*12f080e7Smrj 		case DDI_UNORDERED_OK_ACC:
1133*12f080e7Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
1134*12f080e7Smrj 			break;
1135*12f080e7Smrj 		case DDI_MERGING_OK_ACC:
1136*12f080e7Smrj 			hat_acc_flags = HAT_MERGING_OK;
1137*12f080e7Smrj 			break;
1138*12f080e7Smrj 		case DDI_LOADCACHING_OK_ACC:
1139*12f080e7Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
1140*12f080e7Smrj 			break;
1141*12f080e7Smrj 		case DDI_STORECACHING_OK_ACC:
1142*12f080e7Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
1143*12f080e7Smrj 			break;
1144*12f080e7Smrj 		}
1145*12f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1146*12f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1147*12f080e7Smrj 		impl_acc_hdl_init(hp);
1148*12f080e7Smrj 		hp->ah_hat_flags = hat_acc_flags;
1149*12f080e7Smrj 	} else {
1150*12f080e7Smrj 		hat_acc_flags = HAT_STRICTORDER;
1151*12f080e7Smrj 	}
1152*12f080e7Smrj 
1153*12f080e7Smrj 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1154*12f080e7Smrj 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
1155*12f080e7Smrj 
1156*12f080e7Smrj 	if (rp->regspec_size == 0) {
1157*12f080e7Smrj #ifdef  DDI_MAP_DEBUG
1158*12f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1159*12f080e7Smrj #endif  /* DDI_MAP_DEBUG */
1160*12f080e7Smrj 		return (DDI_ME_INVAL);
1161*12f080e7Smrj 	}
1162*12f080e7Smrj 
1163*12f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1164*12f080e7Smrj 		*vaddrp = (caddr_t)mmu_btop(base);
1165*12f080e7Smrj 	} else {
1166*12f080e7Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1167*12f080e7Smrj 
1168*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
1169*12f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages \
1170*12f080e7Smrj physical %x ",
1171*12f080e7Smrj 		    npages, base);
1172*12f080e7Smrj #endif	/* DDI_MAP_DEBUG */
1173*12f080e7Smrj 
1174*12f080e7Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1175*12f080e7Smrj 		if (cvaddr == NULL)
1176*12f080e7Smrj 			return (DDI_ME_NORESOURCES);
1177*12f080e7Smrj 
1178*12f080e7Smrj 		/*
1179*12f080e7Smrj 		 * Now map in the pages we've allocated...
1180*12f080e7Smrj 		 */
1181*12f080e7Smrj 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
1182*12f080e7Smrj 		    mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK);
1183*12f080e7Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
1184*12f080e7Smrj 	}
1185*12f080e7Smrj 
1186*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
1187*12f080e7Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1188*12f080e7Smrj #endif	/* DDI_MAP_DEBUG */
1189*12f080e7Smrj 	return (DDI_SUCCESS);
1190*12f080e7Smrj }
1191*12f080e7Smrj 
1192*12f080e7Smrj 
1193*12f080e7Smrj /*
1194*12f080e7Smrj  * rootnex_unmap_regspec()
11957c478bd9Sstevel@tonic-gate  *
11967c478bd9Sstevel@tonic-gate  */
11977c478bd9Sstevel@tonic-gate static int
1198*12f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
11997c478bd9Sstevel@tonic-gate {
1200*12f080e7Smrj 	caddr_t addr = (caddr_t)*vaddrp;
1201*12f080e7Smrj 	uint_t npages, pgoffset;
1202*12f080e7Smrj 	struct regspec *rp;
12037c478bd9Sstevel@tonic-gate 
1204*12f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1205*12f080e7Smrj 		return (0);
12067c478bd9Sstevel@tonic-gate 
1207*12f080e7Smrj 	rp = mp->map_obj.rp;
12087c478bd9Sstevel@tonic-gate 
1209*12f080e7Smrj 	if (rp->regspec_size == 0) {
1210*12f080e7Smrj #ifdef  DDI_MAP_DEBUG
1211*12f080e7Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1212*12f080e7Smrj #endif  /* DDI_MAP_DEBUG */
1213*12f080e7Smrj 		return (DDI_ME_INVAL);
12147c478bd9Sstevel@tonic-gate 	}
12157c478bd9Sstevel@tonic-gate 
12167c478bd9Sstevel@tonic-gate 	/*
1217*12f080e7Smrj 	 * I/O or memory mapping:
12187c478bd9Sstevel@tonic-gate 	 *
1219*12f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1220*12f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1221*12f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
12227c478bd9Sstevel@tonic-gate 	 */
1223*12f080e7Smrj 	if (rp->regspec_bustype != 0) {
12247c478bd9Sstevel@tonic-gate 		/*
1225*12f080e7Smrj 		 * This is I/O space, which requires no particular
1226*12f080e7Smrj 		 * processing on unmap since it isn't mapped in the
1227*12f080e7Smrj 		 * first place.
12287c478bd9Sstevel@tonic-gate 		 */
12297c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
12307c478bd9Sstevel@tonic-gate 	}
12317c478bd9Sstevel@tonic-gate 
12327c478bd9Sstevel@tonic-gate 	/*
1233*12f080e7Smrj 	 * Memory space
12347c478bd9Sstevel@tonic-gate 	 */
1235*12f080e7Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1236*12f080e7Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1237*12f080e7Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1238*12f080e7Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
12397c478bd9Sstevel@tonic-gate 
12407c478bd9Sstevel@tonic-gate 	/*
1241*12f080e7Smrj 	 * Destroy the pointer - the mapping has logically gone
12427c478bd9Sstevel@tonic-gate 	 */
1243*12f080e7Smrj 	*vaddrp = NULL;
12447c478bd9Sstevel@tonic-gate 
12457c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
12467c478bd9Sstevel@tonic-gate }
12477c478bd9Sstevel@tonic-gate 
1248*12f080e7Smrj 
1249*12f080e7Smrj /*
1250*12f080e7Smrj  * rootnex_map_handle()
1251*12f080e7Smrj  *
1252*12f080e7Smrj  */
12537c478bd9Sstevel@tonic-gate static int
1254*12f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp)
12557c478bd9Sstevel@tonic-gate {
1256*12f080e7Smrj 	ddi_acc_hdl_t *hp;
1257*12f080e7Smrj 	ulong_t base;
1258*12f080e7Smrj 	uint_t pgoffset;
1259*12f080e7Smrj 	struct regspec *rp;
12607c478bd9Sstevel@tonic-gate 
1261*12f080e7Smrj 	rp = mp->map_obj.rp;
12627c478bd9Sstevel@tonic-gate 
1263*12f080e7Smrj #ifdef	DDI_MAP_DEBUG
1264*12f080e7Smrj 	ddi_map_debug(
1265*12f080e7Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1266*12f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1267*12f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
1268*12f080e7Smrj #endif	/* DDI_MAP_DEBUG */
12697c478bd9Sstevel@tonic-gate 
12707c478bd9Sstevel@tonic-gate 	/*
1271*12f080e7Smrj 	 * I/O or memory mapping:
1272*12f080e7Smrj 	 *
1273*12f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1274*12f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1275*12f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
12767c478bd9Sstevel@tonic-gate 	 */
1277*12f080e7Smrj 	if (rp->regspec_bustype != 0) {
1278*12f080e7Smrj 		/*
1279*12f080e7Smrj 		 * This refers to I/O space, and we don't support "mapping"
1280*12f080e7Smrj 		 * I/O space to a user.
1281*12f080e7Smrj 		 */
12827c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12837c478bd9Sstevel@tonic-gate 	}
12847c478bd9Sstevel@tonic-gate 
12857c478bd9Sstevel@tonic-gate 	/*
1286*12f080e7Smrj 	 * Set up the hat_flags for the mapping.
12877c478bd9Sstevel@tonic-gate 	 */
1288*12f080e7Smrj 	hp = mp->map_handlep;
12897c478bd9Sstevel@tonic-gate 
1290*12f080e7Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1291*12f080e7Smrj 	case DDI_NEVERSWAP_ACC:
1292*12f080e7Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
12937c478bd9Sstevel@tonic-gate 		break;
1294*12f080e7Smrj 	case DDI_STRUCTURE_LE_ACC:
1295*12f080e7Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
12967c478bd9Sstevel@tonic-gate 		break;
1297*12f080e7Smrj 	case DDI_STRUCTURE_BE_ACC:
12987c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12997c478bd9Sstevel@tonic-gate 	default:
1300*12f080e7Smrj 		return (DDI_REGS_ACC_CONFLICT);
13017c478bd9Sstevel@tonic-gate 	}
13027c478bd9Sstevel@tonic-gate 
1303*12f080e7Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
1304*12f080e7Smrj 	case DDI_STRICTORDER_ACC:
13057c478bd9Sstevel@tonic-gate 		break;
1306*12f080e7Smrj 	case DDI_UNORDERED_OK_ACC:
1307*12f080e7Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
13087c478bd9Sstevel@tonic-gate 		break;
1309*12f080e7Smrj 	case DDI_MERGING_OK_ACC:
1310*12f080e7Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
13117c478bd9Sstevel@tonic-gate 		break;
1312*12f080e7Smrj 	case DDI_LOADCACHING_OK_ACC:
1313*12f080e7Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1314*12f080e7Smrj 		break;
1315*12f080e7Smrj 	case DDI_STORECACHING_OK_ACC:
1316*12f080e7Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1317*12f080e7Smrj 		break;
13187c478bd9Sstevel@tonic-gate 	default:
13197c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
13207c478bd9Sstevel@tonic-gate 	}
13217c478bd9Sstevel@tonic-gate 
1322*12f080e7Smrj 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1323*12f080e7Smrj 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
13247c478bd9Sstevel@tonic-gate 
1325*12f080e7Smrj 	if (rp->regspec_size == 0)
1326*12f080e7Smrj 		return (DDI_ME_INVAL);
13277c478bd9Sstevel@tonic-gate 
1328*12f080e7Smrj 	hp->ah_pfn = mmu_btop(base);
1329*12f080e7Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
13307c478bd9Sstevel@tonic-gate 
13317c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
13327c478bd9Sstevel@tonic-gate }
13337c478bd9Sstevel@tonic-gate 
1334*12f080e7Smrj 
1335*12f080e7Smrj 
13367c478bd9Sstevel@tonic-gate /*
1337*12f080e7Smrj  * ************************
1338*12f080e7Smrj  *  interrupt related code
1339*12f080e7Smrj  * ************************
13407c478bd9Sstevel@tonic-gate  */
13417c478bd9Sstevel@tonic-gate 
13427c478bd9Sstevel@tonic-gate /*
1343*12f080e7Smrj  * rootnex_intr_ops()
13447c478bd9Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13457c478bd9Sstevel@tonic-gate  */
13467c478bd9Sstevel@tonic-gate /* ARGSUSED */
13477c478bd9Sstevel@tonic-gate static int
13487c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13497c478bd9Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13507c478bd9Sstevel@tonic-gate {
13517c478bd9Sstevel@tonic-gate 	struct intrspec			*ispec;
13527c478bd9Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
13537c478bd9Sstevel@tonic-gate 
13547c478bd9Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13557c478bd9Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13567c478bd9Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13577c478bd9Sstevel@tonic-gate 
13587c478bd9Sstevel@tonic-gate 	/* Process the interrupt operation */
13597c478bd9Sstevel@tonic-gate 	switch (intr_op) {
13607c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13617c478bd9Sstevel@tonic-gate 		/* First check with pcplusmp */
13627c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13637c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13647c478bd9Sstevel@tonic-gate 
13657c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13667c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
13677c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13687c478bd9Sstevel@tonic-gate 		}
13697c478bd9Sstevel@tonic-gate 		break;
13707c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13717c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13727c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13737c478bd9Sstevel@tonic-gate 
13747c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13757c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13767c478bd9Sstevel@tonic-gate 		break;
13777c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13787c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13797c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13807c478bd9Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13817c478bd9Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13827c478bd9Sstevel@tonic-gate 		break;
13837c478bd9Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13847c478bd9Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13857c478bd9Sstevel@tonic-gate 		/*
13867c478bd9Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
13877c478bd9Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
13887c478bd9Sstevel@tonic-gate 		 * See detailed comments on this in the function
13897c478bd9Sstevel@tonic-gate 		 * rootnex_get_ispec().
13907c478bd9Sstevel@tonic-gate 		 */
13917c478bd9Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
13927c478bd9Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
13937c478bd9Sstevel@tonic-gate 			    pdp->par_nintr);
13947c478bd9Sstevel@tonic-gate 			/*
13957c478bd9Sstevel@tonic-gate 			 * Set it to zero; so that
13967c478bd9Sstevel@tonic-gate 			 * DDI framework doesn't free it again
13977c478bd9Sstevel@tonic-gate 			 */
13987c478bd9Sstevel@tonic-gate 			pdp->par_intr = NULL;
13997c478bd9Sstevel@tonic-gate 			pdp->par_nintr = 0;
14007c478bd9Sstevel@tonic-gate 		}
14017c478bd9Sstevel@tonic-gate 		break;
14027c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14037c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14047c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14057c478bd9Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14067c478bd9Sstevel@tonic-gate 		break;
14077c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14087c478bd9Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14097c478bd9Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14107c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14117c478bd9Sstevel@tonic-gate 
14127c478bd9Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14137c478bd9Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14147c478bd9Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14157c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14167c478bd9Sstevel@tonic-gate 
14177c478bd9Sstevel@tonic-gate 		/* Change the priority */
14187c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14197c478bd9Sstevel@tonic-gate 		    PSM_FAILURE)
14207c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14217c478bd9Sstevel@tonic-gate 
14227c478bd9Sstevel@tonic-gate 		/* update the ispec with the new priority */
14237c478bd9Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14247c478bd9Sstevel@tonic-gate 		break;
14257c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14267c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14277c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14287c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14297c478bd9Sstevel@tonic-gate 		break;
14307c478bd9Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14317c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14327c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14337c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14347c478bd9Sstevel@tonic-gate 		break;
14357c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14367c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14377c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14387c478bd9Sstevel@tonic-gate 
14397c478bd9Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14407c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14417c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14427c478bd9Sstevel@tonic-gate 
14437c478bd9Sstevel@tonic-gate 		hdlp->ih_private = (void *)ispec;
14447c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
14457c478bd9Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
14467c478bd9Sstevel@tonic-gate 
14477c478bd9Sstevel@tonic-gate 		/* Add the interrupt handler */
14487c478bd9Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14497c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
14507c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rdip))
14517c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14527c478bd9Sstevel@tonic-gate 		break;
14537c478bd9Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14547c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14557c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14567c478bd9Sstevel@tonic-gate 
14577c478bd9Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14587c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14597c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14607c478bd9Sstevel@tonic-gate 
14617c478bd9Sstevel@tonic-gate 		hdlp->ih_private = (void *)ispec;
14627c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14637c478bd9Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate 		/* Remove the interrupt handler */
14667c478bd9Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14677c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14687c478bd9Sstevel@tonic-gate 		break;
14697c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14707c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14717c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14727c478bd9Sstevel@tonic-gate 
14737c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14747c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14757c478bd9Sstevel@tonic-gate 		break;
14767c478bd9Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14777c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14787c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14797c478bd9Sstevel@tonic-gate 
14807c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14817c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14827c478bd9Sstevel@tonic-gate 		break;
14837c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14847c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14857c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14867c478bd9Sstevel@tonic-gate 
14877c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14887c478bd9Sstevel@tonic-gate 		    result)) {
14897c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
14907c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14917c478bd9Sstevel@tonic-gate 		}
14927c478bd9Sstevel@tonic-gate 		break;
14937c478bd9Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
14947c478bd9Sstevel@tonic-gate 		if ((pdp = ddi_get_parent_data(rdip)) == NULL)
14957c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14967c478bd9Sstevel@tonic-gate 		*(int *)result = pdp->par_nintr;
14977c478bd9Sstevel@tonic-gate 		if (pdp->par_nintr == 0) {
14987c478bd9Sstevel@tonic-gate 			/*
14997c478bd9Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15007c478bd9Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15017c478bd9Sstevel@tonic-gate 			 *
15027c478bd9Sstevel@tonic-gate 			 * See detailed comments on this in the function
15037c478bd9Sstevel@tonic-gate 			 * rootnex_get_ispec().
15047c478bd9Sstevel@tonic-gate 			 *
15057c478bd9Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15067c478bd9Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15077c478bd9Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15087c478bd9Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15097c478bd9Sstevel@tonic-gate 			 */
15107c478bd9Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15117c478bd9Sstevel@tonic-gate 				*(int *)result = 1;
15127c478bd9Sstevel@tonic-gate 		}
15137c478bd9Sstevel@tonic-gate 		break;
15147c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
15157c478bd9Sstevel@tonic-gate 		*(int *)result = 0;
15167c478bd9Sstevel@tonic-gate 		*(int *)result |= DDI_INTR_TYPE_FIXED;	/* Always ... */
15177c478bd9Sstevel@tonic-gate 		break;
15187c478bd9Sstevel@tonic-gate 	case DDI_INTROP_NAVAIL:
15197c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
15207c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
15217c478bd9Sstevel@tonic-gate 
15227c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL) {
15237c478bd9Sstevel@tonic-gate 			*(int *)result = 1;
15247c478bd9Sstevel@tonic-gate 			break;
15257c478bd9Sstevel@tonic-gate 		}
15267c478bd9Sstevel@tonic-gate 
15277c478bd9Sstevel@tonic-gate 		/* Priority in the handle not initialized yet */
15287c478bd9Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
15297c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
15307c478bd9Sstevel@tonic-gate 		    PSM_INTR_OP_NAVAIL_VECTORS, result);
15317c478bd9Sstevel@tonic-gate 		break;
15327c478bd9Sstevel@tonic-gate 	default:
15337c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
15347c478bd9Sstevel@tonic-gate 	}
15357c478bd9Sstevel@tonic-gate 
15367c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
15377c478bd9Sstevel@tonic-gate }
15387c478bd9Sstevel@tonic-gate 
15397c478bd9Sstevel@tonic-gate 
15407c478bd9Sstevel@tonic-gate /*
1541*12f080e7Smrj  * rootnex_get_ispec()
1542*12f080e7Smrj  *	convert an interrupt number to an interrupt specification.
1543*12f080e7Smrj  *	The interrupt number determines which interrupt spec will be
1544*12f080e7Smrj  *	returned if more than one exists.
1545*12f080e7Smrj  *
1546*12f080e7Smrj  *	Look into the parent private data area of the 'rdip' to find out
1547*12f080e7Smrj  *	the interrupt specification.  First check to make sure there is
1548*12f080e7Smrj  *	one that matchs "inumber" and then return a pointer to it.
1549*12f080e7Smrj  *
1550*12f080e7Smrj  *	Return NULL if one could not be found.
1551*12f080e7Smrj  *
1552*12f080e7Smrj  *	NOTE: This is needed for rootnex_intr_ops()
15537c478bd9Sstevel@tonic-gate  */
1554*12f080e7Smrj static struct intrspec *
1555*12f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
15567c478bd9Sstevel@tonic-gate {
1557*12f080e7Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
15587c478bd9Sstevel@tonic-gate 
15597c478bd9Sstevel@tonic-gate 	/*
1560*12f080e7Smrj 	 * Special case handling for drivers that provide their own
1561*12f080e7Smrj 	 * intrspec structures instead of relying on the DDI framework.
1562*12f080e7Smrj 	 *
1563*12f080e7Smrj 	 * A broken hardware driver in ON could potentially provide its
1564*12f080e7Smrj 	 * own intrspec structure, instead of relying on the hardware.
1565*12f080e7Smrj 	 * If these drivers are children of 'rootnex' then we need to
1566*12f080e7Smrj 	 * continue to provide backward compatibility to them here.
1567*12f080e7Smrj 	 *
1568*12f080e7Smrj 	 * Following check is a special case for 'pcic' driver which
1569*12f080e7Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
1570*12f080e7Smrj 	 *
1571*12f080e7Smrj 	 * Verbatim comments from this driver are shown here:
1572*12f080e7Smrj 	 * "Don't use the ddi_add_intr since we don't have a
1573*12f080e7Smrj 	 * default intrspec in all cases."
1574*12f080e7Smrj 	 *
1575*12f080e7Smrj 	 * Since an 'ispec' may not be always created for it,
1576*12f080e7Smrj 	 * check for that and create one if so.
1577*12f080e7Smrj 	 *
1578*12f080e7Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
15797c478bd9Sstevel@tonic-gate 	 */
1580*12f080e7Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1581*12f080e7Smrj 		pdp->par_nintr = 1;
1582*12f080e7Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1583*12f080e7Smrj 		    pdp->par_nintr, KM_SLEEP);
1584*12f080e7Smrj 	}
1585*12f080e7Smrj 
1586*12f080e7Smrj 	/* Validate the interrupt number */
1587*12f080e7Smrj 	if (inum >= pdp->par_nintr)
1588*12f080e7Smrj 		return (NULL);
1589*12f080e7Smrj 
1590*12f080e7Smrj 	/* Get the interrupt structure pointer and return that */
1591*12f080e7Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
1592*12f080e7Smrj }
1593*12f080e7Smrj 
1594*12f080e7Smrj 
1595*12f080e7Smrj /*
1596*12f080e7Smrj  * rootnex_xlate_intrs()
1597*12f080e7Smrj  *     For the x86 rootnexus, we're prepared to claim that the interrupt string
1598*12f080e7Smrj  *     is in the form of a list of <ipl,vec> specifications.
1599*12f080e7Smrj  */
1600*12f080e7Smrj static int
1601*12f080e7Smrj rootnex_xlate_intrs(dev_info_t *dip, dev_info_t *rdip, int *in,
1602*12f080e7Smrj     struct ddi_parent_private_data *pdptr)
1603*12f080e7Smrj {
1604*12f080e7Smrj 	size_t size;
1605*12f080e7Smrj 	int n;
1606*12f080e7Smrj 	struct intrspec *new;
1607*12f080e7Smrj 	caddr_t got_prop;
1608*12f080e7Smrj 	int *inpri;
1609*12f080e7Smrj 	int got_len;
1610*12f080e7Smrj 
1611*12f080e7Smrj 	static char bad_intr_fmt[] =
1612*12f080e7Smrj 	    "rootnex: bad interrupt spec from %s%d - ipl %d, irq %d\n";
1613*12f080e7Smrj 
1614*12f080e7Smrj #ifdef	lint
1615*12f080e7Smrj 	dip = dip;
1616*12f080e7Smrj #endif
1617*12f080e7Smrj 	/*
1618*12f080e7Smrj 	 * determine if the driver is expecting the new style "interrupts"
1619*12f080e7Smrj 	 * property which just contains the IRQ, or the old style which
1620*12f080e7Smrj 	 * contains pairs of <IPL,IRQ>.  if it is the new style, we always
1621*12f080e7Smrj 	 * assign IPL 5 unless an "interrupt-priorities" property exists.
1622*12f080e7Smrj 	 * in that case, the "interrupt-priorities" property contains the
1623*12f080e7Smrj 	 * IPL values that match, one for one, the IRQ values in the
1624*12f080e7Smrj 	 * "interrupts" property.
1625*12f080e7Smrj 	 */
1626*12f080e7Smrj 	inpri = NULL;
1627*12f080e7Smrj 	if ((ddi_getprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1628*12f080e7Smrj 	    "ignore-hardware-nodes", -1) != -1) ||
1629*12f080e7Smrj 	    ignore_hardware_nodes) {
1630*12f080e7Smrj 		/* the old style "interrupts" property... */
1631*12f080e7Smrj 
1632*12f080e7Smrj 		/*
1633*12f080e7Smrj 		 * The list consists of <ipl,vec> elements
1634*12f080e7Smrj 		 */
1635*12f080e7Smrj 		if ((n = (*in++ >> 1)) < 1)
1636*12f080e7Smrj 			return (DDI_FAILURE);
1637*12f080e7Smrj 
1638*12f080e7Smrj 		pdptr->par_nintr = n;
1639*12f080e7Smrj 		size = n * sizeof (struct intrspec);
1640*12f080e7Smrj 		new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
1641*12f080e7Smrj 
1642*12f080e7Smrj 		while (n--) {
1643*12f080e7Smrj 			int level = *in++;
1644*12f080e7Smrj 			int vec = *in++;
1645*12f080e7Smrj 
1646*12f080e7Smrj 			if (level < 1 || level > MAXIPL ||
1647*12f080e7Smrj 			    vec < VEC_MIN || vec > VEC_MAX) {
1648*12f080e7Smrj 				cmn_err(CE_CONT, bad_intr_fmt,
1649*12f080e7Smrj 				    DEVI(rdip)->devi_name,
1650*12f080e7Smrj 				    DEVI(rdip)->devi_instance, level, vec);
1651*12f080e7Smrj 				goto broken;
1652*12f080e7Smrj 			}
1653*12f080e7Smrj 			new->intrspec_pri = level;
1654*12f080e7Smrj 			if (vec != 2)
1655*12f080e7Smrj 				new->intrspec_vec = vec;
1656*12f080e7Smrj 			else
1657*12f080e7Smrj 				/*
1658*12f080e7Smrj 				 * irq 2 on the PC bus is tied to irq 9
1659*12f080e7Smrj 				 * on ISA, EISA and MicroChannel
1660*12f080e7Smrj 				 */
1661*12f080e7Smrj 				new->intrspec_vec = 9;
1662*12f080e7Smrj 			new++;
1663*12f080e7Smrj 		}
1664*12f080e7Smrj 
1665*12f080e7Smrj 		return (DDI_SUCCESS);
1666*12f080e7Smrj 	} else {
1667*12f080e7Smrj 		/* the new style "interrupts" property... */
1668*12f080e7Smrj 
1669*12f080e7Smrj 		/*
1670*12f080e7Smrj 		 * The list consists of <vec> elements
1671*12f080e7Smrj 		 */
1672*12f080e7Smrj 		if ((n = (*in++)) < 1)
1673*12f080e7Smrj 			return (DDI_FAILURE);
1674*12f080e7Smrj 
1675*12f080e7Smrj 		pdptr->par_nintr = n;
1676*12f080e7Smrj 		size = n * sizeof (struct intrspec);
1677*12f080e7Smrj 		new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
1678*12f080e7Smrj 
1679*12f080e7Smrj 		/* XXX check for "interrupt-priorities" property... */
1680*12f080e7Smrj 		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1681*12f080e7Smrj 		    "interrupt-priorities", (caddr_t)&got_prop, &got_len)
1682*12f080e7Smrj 		    == DDI_PROP_SUCCESS) {
1683*12f080e7Smrj 			if (n != (got_len / sizeof (int))) {
1684*12f080e7Smrj 				cmn_err(CE_CONT,
1685*12f080e7Smrj 				    "rootnex: bad interrupt-priorities length"
1686*12f080e7Smrj 				    " from %s%d: expected %d, got %d\n",
1687*12f080e7Smrj 				    DEVI(rdip)->devi_name,
1688*12f080e7Smrj 				    DEVI(rdip)->devi_instance, n,
1689*12f080e7Smrj 				    (int)(got_len / sizeof (int)));
1690*12f080e7Smrj 				goto broken;
1691*12f080e7Smrj 			}
1692*12f080e7Smrj 			inpri = (int *)got_prop;
1693*12f080e7Smrj 		}
1694*12f080e7Smrj 
1695*12f080e7Smrj 		while (n--) {
1696*12f080e7Smrj 			int level;
1697*12f080e7Smrj 			int vec = *in++;
1698*12f080e7Smrj 
1699*12f080e7Smrj 			if (inpri == NULL)
1700*12f080e7Smrj 				level = 5;
1701*12f080e7Smrj 			else
1702*12f080e7Smrj 				level = *inpri++;
1703*12f080e7Smrj 
1704*12f080e7Smrj 			if (level < 1 || level > MAXIPL ||
1705*12f080e7Smrj 			    vec < VEC_MIN || vec > VEC_MAX) {
1706*12f080e7Smrj 				cmn_err(CE_CONT, bad_intr_fmt,
1707*12f080e7Smrj 				    DEVI(rdip)->devi_name,
1708*12f080e7Smrj 				    DEVI(rdip)->devi_instance, level, vec);
1709*12f080e7Smrj 				goto broken;
1710*12f080e7Smrj 			}
1711*12f080e7Smrj 			new->intrspec_pri = level;
1712*12f080e7Smrj 			if (vec != 2)
1713*12f080e7Smrj 				new->intrspec_vec = vec;
1714*12f080e7Smrj 			else
1715*12f080e7Smrj 				/*
1716*12f080e7Smrj 				 * irq 2 on the PC bus is tied to irq 9
1717*12f080e7Smrj 				 * on ISA, EISA and MicroChannel
1718*12f080e7Smrj 				 */
1719*12f080e7Smrj 				new->intrspec_vec = 9;
1720*12f080e7Smrj 			new++;
1721*12f080e7Smrj 		}
1722*12f080e7Smrj 
1723*12f080e7Smrj 		if (inpri != NULL)
1724*12f080e7Smrj 			kmem_free(got_prop, got_len);
1725*12f080e7Smrj 		return (DDI_SUCCESS);
1726*12f080e7Smrj 	}
1727*12f080e7Smrj 
1728*12f080e7Smrj broken:
1729*12f080e7Smrj 	kmem_free(pdptr->par_intr, size);
1730*12f080e7Smrj 	pdptr->par_intr = NULL;
1731*12f080e7Smrj 	pdptr->par_nintr = 0;
1732*12f080e7Smrj 	if (inpri != NULL)
1733*12f080e7Smrj 		kmem_free(got_prop, got_len);
1734*12f080e7Smrj 	return (DDI_FAILURE);
1735*12f080e7Smrj }
1736*12f080e7Smrj 
1737*12f080e7Smrj 
1738*12f080e7Smrj /*
1739*12f080e7Smrj  * ******************
1740*12f080e7Smrj  *  dma related code
1741*12f080e7Smrj  * ******************
1742*12f080e7Smrj  */
1743*12f080e7Smrj 
1744*12f080e7Smrj /*
1745*12f080e7Smrj  * rootnex_dma_allochdl()
1746*12f080e7Smrj  *    called from ddi_dma_alloc_handle().
1747*12f080e7Smrj  */
1748*12f080e7Smrj /*ARGSUSED*/
1749*12f080e7Smrj static int
1750*12f080e7Smrj rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1751*12f080e7Smrj     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1752*12f080e7Smrj {
1753*12f080e7Smrj 	uint64_t maxsegmentsize_ll;
1754*12f080e7Smrj 	uint_t maxsegmentsize;
1755*12f080e7Smrj 	ddi_dma_impl_t *hp;
1756*12f080e7Smrj 	rootnex_dma_t *dma;
1757*12f080e7Smrj 	uint64_t count_max;
1758*12f080e7Smrj 	uint64_t seg;
1759*12f080e7Smrj 	int kmflag;
1760*12f080e7Smrj 	int e;
1761*12f080e7Smrj 
1762*12f080e7Smrj 
1763*12f080e7Smrj 	/* convert our sleep flags */
1764*12f080e7Smrj 	if (waitfp == DDI_DMA_SLEEP) {
1765*12f080e7Smrj 		kmflag = KM_SLEEP;
1766*12f080e7Smrj 	} else {
1767*12f080e7Smrj 		kmflag = KM_NOSLEEP;
1768*12f080e7Smrj 	}
1769*12f080e7Smrj 
1770*12f080e7Smrj 	/*
1771*12f080e7Smrj 	 * We try to do only one memory allocation here. We'll do a little
1772*12f080e7Smrj 	 * pointer manipulation later. If the bind ends up taking more than
1773*12f080e7Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
1774*12f080e7Smrj 	 * bind operation. Not great, but much better than before and the
1775*12f080e7Smrj 	 * best we can do with the current bind interfaces.
1776*12f080e7Smrj 	 */
1777*12f080e7Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1778*12f080e7Smrj 	if (hp == NULL) {
1779*12f080e7Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
1780*12f080e7Smrj 			ddi_set_callback(waitfp, arg,
1781*12f080e7Smrj 			    &rootnex_state->r_dvma_call_list_id);
1782*12f080e7Smrj 		}
1783*12f080e7Smrj 		return (DDI_DMA_NORESOURCES);
1784*12f080e7Smrj 	}
1785*12f080e7Smrj 
1786*12f080e7Smrj 	/* Do our pointer manipulation now, align the structures */
1787*12f080e7Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
1788*12f080e7Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1789*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1790*12f080e7Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1791*12f080e7Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1792*12f080e7Smrj 
1793*12f080e7Smrj 	/* setup the handle */
1794*12f080e7Smrj 	rootnex_clean_dmahdl(hp);
1795*12f080e7Smrj 	dma->dp_dip = rdip;
1796*12f080e7Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1797*12f080e7Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1798*12f080e7Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1799*12f080e7Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1800*12f080e7Smrj 	hp->dmai_rdip = rdip;
1801*12f080e7Smrj 	hp->dmai_attr = *attr;
1802*12f080e7Smrj 
1803*12f080e7Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
1804*12f080e7Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1805*12f080e7Smrj 
1806*12f080e7Smrj 	/*
1807*12f080e7Smrj 	 * Figure out our maximum segment size. If the segment size is greater
1808*12f080e7Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1809*12f080e7Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1810*12f080e7Smrj 	 * dma_attr_count_max are size-1 type values.
1811*12f080e7Smrj 	 *
1812*12f080e7Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
1813*12f080e7Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
1814*12f080e7Smrj 	 * single cookie).
1815*12f080e7Smrj 	 */
1816*12f080e7Smrj 
1817*12f080e7Smrj 	/* handle the rollover cases */
1818*12f080e7Smrj 	seg = attr->dma_attr_seg + 1;
1819*12f080e7Smrj 	if (seg < attr->dma_attr_seg) {
1820*12f080e7Smrj 		seg = attr->dma_attr_seg;
1821*12f080e7Smrj 	}
1822*12f080e7Smrj 	count_max = attr->dma_attr_count_max + 1;
1823*12f080e7Smrj 	if (count_max < attr->dma_attr_count_max) {
1824*12f080e7Smrj 		count_max = attr->dma_attr_count_max;
1825*12f080e7Smrj 	}
1826*12f080e7Smrj 
1827*12f080e7Smrj 	/*
1828*12f080e7Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
1829*12f080e7Smrj 	 * use a simple mask.
1830*12f080e7Smrj 	 */
1831*12f080e7Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1832*12f080e7Smrj 		dma->dp_granularity_power_2 = B_FALSE;
1833*12f080e7Smrj 	} else {
1834*12f080e7Smrj 		dma->dp_granularity_power_2 = B_TRUE;
1835*12f080e7Smrj 	}
1836*12f080e7Smrj 
1837*12f080e7Smrj 	/*
1838*12f080e7Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
1839*12f080e7Smrj 	 * break up a window because we're greater than maxxfer, we might as
1840*12f080e7Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1841*12f080e7Smrj 	 * worry about triming the window later on for this case.
1842*12f080e7Smrj 	 */
1843*12f080e7Smrj 	if (attr->dma_attr_granular > 1) {
1844*12f080e7Smrj 		if (dma->dp_granularity_power_2) {
1845*12f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1846*12f080e7Smrj 			    (attr->dma_attr_maxxfer &
1847*12f080e7Smrj 			    (attr->dma_attr_granular - 1));
1848*12f080e7Smrj 		} else {
1849*12f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1850*12f080e7Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1851*12f080e7Smrj 		}
1852*12f080e7Smrj 	} else {
1853*12f080e7Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1854*12f080e7Smrj 	}
1855*12f080e7Smrj 
1856*12f080e7Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1857*12f080e7Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1858*12f080e7Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1859*12f080e7Smrj 		maxsegmentsize = 0xFFFFFFFF;
1860*12f080e7Smrj 	} else {
1861*12f080e7Smrj 		maxsegmentsize = maxsegmentsize_ll;
1862*12f080e7Smrj 	}
1863*12f080e7Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1864*12f080e7Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1865*12f080e7Smrj 
1866*12f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1867*12f080e7Smrj 	if (rootnex_alloc_check_parms) {
1868*12f080e7Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1869*12f080e7Smrj 		if (e != DDI_SUCCESS) {
1870*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1871*12f080e7Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
1872*12f080e7Smrj 			    (ddi_dma_handle_t)hp);
1873*12f080e7Smrj 			return (e);
1874*12f080e7Smrj 		}
1875*12f080e7Smrj 	}
1876*12f080e7Smrj 
1877*12f080e7Smrj 	*handlep = (ddi_dma_handle_t)hp;
1878*12f080e7Smrj 
1879*12f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1880*12f080e7Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
1881*12f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1882*12f080e7Smrj 
1883*12f080e7Smrj 	return (DDI_SUCCESS);
1884*12f080e7Smrj }
1885*12f080e7Smrj 
1886*12f080e7Smrj 
1887*12f080e7Smrj /*
1888*12f080e7Smrj  * rootnex_dma_freehdl()
1889*12f080e7Smrj  *    called from ddi_dma_free_handle().
1890*12f080e7Smrj  */
1891*12f080e7Smrj /*ARGSUSED*/
1892*12f080e7Smrj static int
1893*12f080e7Smrj rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1894*12f080e7Smrj {
1895*12f080e7Smrj 	ddi_dma_impl_t *hp;
1896*12f080e7Smrj 	rootnex_dma_t *dma;
1897*12f080e7Smrj 
1898*12f080e7Smrj 
1899*12f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
1900*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1901*12f080e7Smrj 
1902*12f080e7Smrj 	/* unbind should have been called first */
1903*12f080e7Smrj 	ASSERT(!dma->dp_inuse);
1904*12f080e7Smrj 
1905*12f080e7Smrj 	mutex_destroy(&dma->dp_mutex);
1906*12f080e7Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1907*12f080e7Smrj 
1908*12f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1909*12f080e7Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
1910*12f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1911*12f080e7Smrj 
1912*12f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
1913*12f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1914*12f080e7Smrj 
1915*12f080e7Smrj 	return (DDI_SUCCESS);
1916*12f080e7Smrj }
1917*12f080e7Smrj 
1918*12f080e7Smrj 
1919*12f080e7Smrj /*
1920*12f080e7Smrj  * rootnex_dma_bindhdl()
1921*12f080e7Smrj  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
1922*12f080e7Smrj  */
1923*12f080e7Smrj /*ARGSUSED*/
1924*12f080e7Smrj static int
1925*12f080e7Smrj rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1926*12f080e7Smrj     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1927*12f080e7Smrj {
1928*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
1929*12f080e7Smrj 	ddi_dma_attr_t *attr;
1930*12f080e7Smrj 	ddi_dma_impl_t *hp;
1931*12f080e7Smrj 	rootnex_dma_t *dma;
1932*12f080e7Smrj 	int kmflag;
1933*12f080e7Smrj 	int e;
1934*12f080e7Smrj 
1935*12f080e7Smrj 
1936*12f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
1937*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1938*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
1939*12f080e7Smrj 	attr = &hp->dmai_attr;
1940*12f080e7Smrj 
1941*12f080e7Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1942*12f080e7Smrj 
1943*12f080e7Smrj 	/*
1944*12f080e7Smrj 	 * This is useful for debugging a driver. Not as useful in a production
1945*12f080e7Smrj 	 * system. The only time this will fail is if you have a driver bug.
1946*12f080e7Smrj 	 */
1947*12f080e7Smrj 	if (rootnex_bind_check_inuse) {
1948*12f080e7Smrj 		/*
1949*12f080e7Smrj 		 * No one else should ever have this lock unless someone else
1950*12f080e7Smrj 		 * is trying to use this handle. So contention on the lock
1951*12f080e7Smrj 		 * is the same as inuse being set.
1952*12f080e7Smrj 		 */
1953*12f080e7Smrj 		e = mutex_tryenter(&dma->dp_mutex);
1954*12f080e7Smrj 		if (e == 0) {
1955*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1956*12f080e7Smrj 			return (DDI_DMA_INUSE);
1957*12f080e7Smrj 		}
1958*12f080e7Smrj 		if (dma->dp_inuse) {
1959*12f080e7Smrj 			mutex_exit(&dma->dp_mutex);
1960*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1961*12f080e7Smrj 			return (DDI_DMA_INUSE);
1962*12f080e7Smrj 		}
1963*12f080e7Smrj 		dma->dp_inuse = B_TRUE;
1964*12f080e7Smrj 		mutex_exit(&dma->dp_mutex);
1965*12f080e7Smrj 	}
1966*12f080e7Smrj 
1967*12f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1968*12f080e7Smrj 	if (rootnex_bind_check_parms) {
1969*12f080e7Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
1970*12f080e7Smrj 		if (e != DDI_SUCCESS) {
1971*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1972*12f080e7Smrj 			rootnex_clean_dmahdl(hp);
1973*12f080e7Smrj 			return (e);
1974*12f080e7Smrj 		}
1975*12f080e7Smrj 	}
1976*12f080e7Smrj 
1977*12f080e7Smrj 	/* save away the original bind info */
1978*12f080e7Smrj 	dma->dp_dma = dmareq->dmar_object;
1979*12f080e7Smrj 
1980*12f080e7Smrj 	/*
1981*12f080e7Smrj 	 * Figure out a rough estimate of what maximum number of pages this
1982*12f080e7Smrj 	 * buffer could use (a high estimate of course).
1983*12f080e7Smrj 	 */
1984*12f080e7Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1985*12f080e7Smrj 
1986*12f080e7Smrj 	/*
1987*12f080e7Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
1988*12f080e7Smrj 	 * fit (more important to be consistent, we don't want to create
1989*12f080e7Smrj 	 * additional degenerate cases).
1990*12f080e7Smrj 	 */
1991*12f080e7Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1992*12f080e7Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1993*12f080e7Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
1994*12f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1995*12f080e7Smrj 		    uint_t, sinfo->si_max_pages);
1996*12f080e7Smrj 
1997*12f080e7Smrj 	/*
1998*12f080e7Smrj 	 * For anything larger than that, we'll go ahead and allocate the
1999*12f080e7Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
2000*12f080e7Smrj 	 * seeing this path in the fast path for high performance devices very
2001*12f080e7Smrj 	 * frequently.
2002*12f080e7Smrj 	 *
2003*12f080e7Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
2004*12f080e7Smrj 	 * the bind interface would speed this case up.
2005*12f080e7Smrj 	 */
2006*12f080e7Smrj 	} else {
2007*12f080e7Smrj 		/* convert the sleep flags */
2008*12f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2009*12f080e7Smrj 			kmflag =  KM_SLEEP;
2010*12f080e7Smrj 		} else {
2011*12f080e7Smrj 			kmflag =  KM_NOSLEEP;
2012*12f080e7Smrj 		}
2013*12f080e7Smrj 
2014*12f080e7Smrj 		/*
2015*12f080e7Smrj 		 * Save away how much memory we allocated. If we're doing a
2016*12f080e7Smrj 		 * nosleep, the alloc could fail...
2017*12f080e7Smrj 		 */
2018*12f080e7Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
2019*12f080e7Smrj 		    sizeof (ddi_dma_cookie_t);
2020*12f080e7Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
2021*12f080e7Smrj 		if (dma->dp_cookies == NULL) {
2022*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2023*12f080e7Smrj 			rootnex_clean_dmahdl(hp);
2024*12f080e7Smrj 			return (DDI_DMA_NORESOURCES);
2025*12f080e7Smrj 		}
2026*12f080e7Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
2027*12f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
2028*12f080e7Smrj 		    sinfo->si_max_pages);
2029*12f080e7Smrj 	}
2030*12f080e7Smrj 	hp->dmai_cookie = dma->dp_cookies;
2031*12f080e7Smrj 
2032*12f080e7Smrj 	/*
2033*12f080e7Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2034*12f080e7Smrj 	 * looking at the contraints in the dma structure. It will then put some
2035*12f080e7Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
2036*12f080e7Smrj 	 * clean, or do we need to do some munging; how many pages need to be
2037*12f080e7Smrj 	 * copied, etc.)
2038*12f080e7Smrj 	 */
2039*12f080e7Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
2040*12f080e7Smrj 	    &dma->dp_sglinfo);
2041*12f080e7Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
2042*12f080e7Smrj 
2043*12f080e7Smrj 	/* if we don't need a copy buffer, we don't need to sync */
2044*12f080e7Smrj 	if (sinfo->si_copybuf_req == 0) {
2045*12f080e7Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
2046*12f080e7Smrj 	}
2047*12f080e7Smrj 
2048*12f080e7Smrj 	/*
2049*12f080e7Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
2050*12f080e7Smrj 	 * hit the fast path. All the high performance devices should be trying
2051*12f080e7Smrj 	 * to hit this path. To hit this path, a device should be able to reach
2052*12f080e7Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
2053*12f080e7Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
2054*12f080e7Smrj 	 * handle [sgllen]).
2055*12f080e7Smrj 	 */
2056*12f080e7Smrj 	if ((sinfo->si_copybuf_req == 0) &&
2057*12f080e7Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
2058*12f080e7Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
2059*12f080e7Smrj 		/*
2060*12f080e7Smrj 		 * copy out the first cookie and ccountp, set the cookie
2061*12f080e7Smrj 		 * pointer to the second cookie. The first cookie is passed
2062*12f080e7Smrj 		 * back on the stack. Additional cookies are accessed via
2063*12f080e7Smrj 		 * ddi_dma_nextcookie()
2064*12f080e7Smrj 		 */
2065*12f080e7Smrj 		*cookiep = dma->dp_cookies[0];
2066*12f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
2067*12f080e7Smrj 		hp->dmai_cookie++;
2068*12f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2069*12f080e7Smrj 		hp->dmai_nwin = 1;
2070*12f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2071*12f080e7Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
2072*12f080e7Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2073*12f080e7Smrj 		    dma->dp_dma.dmao_size);
2074*12f080e7Smrj 		return (DDI_DMA_MAPPED);
2075*12f080e7Smrj 	}
2076*12f080e7Smrj 
2077*12f080e7Smrj 	/*
2078*12f080e7Smrj 	 * go to the slow path, we may need to alloc more memory, create
2079*12f080e7Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
2080*12f080e7Smrj 	 */
2081*12f080e7Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
2082*12f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2083*12f080e7Smrj 		if (dma->dp_need_to_free_cookie) {
2084*12f080e7Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2085*12f080e7Smrj 		}
2086*12f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2087*12f080e7Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2088*12f080e7Smrj 		return (e);
2089*12f080e7Smrj 	}
2090*12f080e7Smrj 
2091*12f080e7Smrj 	/* if the first window uses the copy buffer, sync it for the device */
2092*12f080e7Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2093*12f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
2094*12f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2095*12f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
2096*12f080e7Smrj 	}
2097*12f080e7Smrj 
2098*12f080e7Smrj 	/*
2099*12f080e7Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2100*12f080e7Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2101*12f080e7Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
2102*12f080e7Smrj 	 * cookies we return is the number of cookies in the first window.
2103*12f080e7Smrj 	 */
2104*12f080e7Smrj 	if (e == DDI_DMA_MAPPED) {
2105*12f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2106*12f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
2107*12f080e7Smrj 	} else {
2108*12f080e7Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2109*12f080e7Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2110*12f080e7Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2111*12f080e7Smrj 	}
2112*12f080e7Smrj 	*cookiep = dma->dp_cookies[0];
2113*12f080e7Smrj 	hp->dmai_cookie++;
2114*12f080e7Smrj 
2115*12f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2116*12f080e7Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2117*12f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2118*12f080e7Smrj 	    dma->dp_dma.dmao_size);
2119*12f080e7Smrj 	return (e);
2120*12f080e7Smrj }
2121*12f080e7Smrj 
2122*12f080e7Smrj 
2123*12f080e7Smrj /*
2124*12f080e7Smrj  * rootnex_dma_unbindhdl()
2125*12f080e7Smrj  *    called from ddi_dma_unbind_handle()
2126*12f080e7Smrj  */
2127*12f080e7Smrj /*ARGSUSED*/
2128*12f080e7Smrj static int
2129*12f080e7Smrj rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2130*12f080e7Smrj     ddi_dma_handle_t handle)
2131*12f080e7Smrj {
2132*12f080e7Smrj 	ddi_dma_impl_t *hp;
2133*12f080e7Smrj 	rootnex_dma_t *dma;
2134*12f080e7Smrj 	int e;
2135*12f080e7Smrj 
2136*12f080e7Smrj 
2137*12f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
2138*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2139*12f080e7Smrj 
2140*12f080e7Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
2141*12f080e7Smrj 	if (rootnex_unbind_verify_buffer) {
2142*12f080e7Smrj 		e = rootnex_verify_buffer(dma);
2143*12f080e7Smrj 		if (e != DDI_SUCCESS) {
2144*12f080e7Smrj 			ASSERT(0);
2145*12f080e7Smrj 			return (DDI_FAILURE);
2146*12f080e7Smrj 		}
2147*12f080e7Smrj 	}
2148*12f080e7Smrj 
2149*12f080e7Smrj 	/* sync the current window before unbinding the buffer */
2150*12f080e7Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2151*12f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
2152*12f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2153*12f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
2154*12f080e7Smrj 	}
2155*12f080e7Smrj 
2156*12f080e7Smrj 	/*
2157*12f080e7Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
2158*12f080e7Smrj 	 * buffer or windows, there won't be much to do :-)
2159*12f080e7Smrj 	 */
2160*12f080e7Smrj 	rootnex_teardown_copybuf(dma);
2161*12f080e7Smrj 	rootnex_teardown_windows(dma);
2162*12f080e7Smrj 
2163*12f080e7Smrj 	/*
2164*12f080e7Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
2165*12f080e7Smrj 	 * fit into our pre-allocate buffer), free that up now
2166*12f080e7Smrj 	 */
2167*12f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
2168*12f080e7Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2169*12f080e7Smrj 	}
2170*12f080e7Smrj 
2171*12f080e7Smrj 	/*
2172*12f080e7Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
2173*12f080e7Smrj 	 * handle is reused).
2174*12f080e7Smrj 	 */
2175*12f080e7Smrj 	rootnex_clean_dmahdl(hp);
2176*12f080e7Smrj 
2177*12f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
2178*12f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2179*12f080e7Smrj 
2180*12f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2181*12f080e7Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
2182*12f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2183*12f080e7Smrj 
2184*12f080e7Smrj 	return (DDI_SUCCESS);
2185*12f080e7Smrj }
2186*12f080e7Smrj 
2187*12f080e7Smrj 
2188*12f080e7Smrj /*
2189*12f080e7Smrj  * rootnex_verify_buffer()
2190*12f080e7Smrj  *   verify buffer wasn't free'd
2191*12f080e7Smrj  */
2192*12f080e7Smrj static int
2193*12f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
2194*12f080e7Smrj {
2195*12f080e7Smrj 	peekpoke_ctlops_t peek;
2196*12f080e7Smrj 	page_t **pplist;
2197*12f080e7Smrj 	caddr_t vaddr;
2198*12f080e7Smrj 	uint_t pcnt;
2199*12f080e7Smrj 	uint_t poff;
2200*12f080e7Smrj 	page_t *pp;
2201*12f080e7Smrj 	uint8_t b;
2202*12f080e7Smrj 	int i;
2203*12f080e7Smrj 	int e;
2204*12f080e7Smrj 
2205*12f080e7Smrj 
2206*12f080e7Smrj 	/* Figure out how many pages this buffer occupies */
2207*12f080e7Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2208*12f080e7Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2209*12f080e7Smrj 	} else {
2210*12f080e7Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2211*12f080e7Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2212*12f080e7Smrj 	}
2213*12f080e7Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2214*12f080e7Smrj 
2215*12f080e7Smrj 	switch (dma->dp_dma.dmao_type) {
2216*12f080e7Smrj 	case DMA_OTYP_PAGES:
2217*12f080e7Smrj 		/*
2218*12f080e7Smrj 		 * for a linked list of pp's walk through them to make sure
2219*12f080e7Smrj 		 * they're locked and not free.
2220*12f080e7Smrj 		 */
2221*12f080e7Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2222*12f080e7Smrj 		for (i = 0; i < pcnt; i++) {
2223*12f080e7Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2224*12f080e7Smrj 				return (DDI_FAILURE);
2225*12f080e7Smrj 			}
22267c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
22277c478bd9Sstevel@tonic-gate 		}
22287c478bd9Sstevel@tonic-gate 		break;
2229*12f080e7Smrj 
22307c478bd9Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
22317c478bd9Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
2232*12f080e7Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2233*12f080e7Smrj 		/*
2234*12f080e7Smrj 		 * for an array of pp's walk through them to make sure they're
2235*12f080e7Smrj 		 * not free. It's possible that they may not be locked.
2236*12f080e7Smrj 		 */
2237*12f080e7Smrj 		if (pplist) {
2238*12f080e7Smrj 			for (i = 0; i < pcnt; i++) {
2239*12f080e7Smrj 				if (PP_ISFREE(pplist[i])) {
2240*12f080e7Smrj 					return (DDI_FAILURE);
2241*12f080e7Smrj 				}
2242*12f080e7Smrj 			}
2243*12f080e7Smrj 
2244*12f080e7Smrj 		/* For a virtual address, try to peek at each page */
2245*12f080e7Smrj 		} else {
2246*12f080e7Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
2247*12f080e7Smrj 				bzero(&peek, sizeof (peekpoke_ctlops_t));
2248*12f080e7Smrj 				peek.host_addr = (uintptr_t)&b;
2249*12f080e7Smrj 				peek.size = sizeof (uint8_t);
2250*12f080e7Smrj 				peek.dev_addr = (uintptr_t)vaddr;
2251*12f080e7Smrj 				for (i = 0; i < pcnt; i++) {
2252*12f080e7Smrj 					e = rootnex_ctlops_peek(&peek, &b);
2253*12f080e7Smrj 					if (e != DDI_SUCCESS) {
2254*12f080e7Smrj 						return (DDI_FAILURE);
2255*12f080e7Smrj 					}
2256*12f080e7Smrj 					peek.dev_addr += MMU_PAGESIZE;
2257*12f080e7Smrj 				}
2258*12f080e7Smrj 			}
2259*12f080e7Smrj 		}
2260*12f080e7Smrj 		break;
2261*12f080e7Smrj 
2262*12f080e7Smrj 	default:
2263*12f080e7Smrj 		ASSERT(0);
2264*12f080e7Smrj 		break;
2265*12f080e7Smrj 	}
2266*12f080e7Smrj 
2267*12f080e7Smrj 	return (DDI_SUCCESS);
2268*12f080e7Smrj }
2269*12f080e7Smrj 
2270*12f080e7Smrj 
2271*12f080e7Smrj /*
2272*12f080e7Smrj  * rootnex_clean_dmahdl()
2273*12f080e7Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
2274*12f080e7Smrj  *    unbind handle. Set the handle state to the default settings.
2275*12f080e7Smrj  */
2276*12f080e7Smrj static void
2277*12f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2278*12f080e7Smrj {
2279*12f080e7Smrj 	rootnex_dma_t *dma;
2280*12f080e7Smrj 
2281*12f080e7Smrj 
2282*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2283*12f080e7Smrj 
2284*12f080e7Smrj 	hp->dmai_nwin = 0;
2285*12f080e7Smrj 	dma->dp_current_cookie = 0;
2286*12f080e7Smrj 	dma->dp_copybuf_size = 0;
2287*12f080e7Smrj 	dma->dp_window = NULL;
2288*12f080e7Smrj 	dma->dp_cbaddr = NULL;
2289*12f080e7Smrj 	dma->dp_inuse = B_FALSE;
2290*12f080e7Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
2291*12f080e7Smrj 	dma->dp_need_to_free_window = B_FALSE;
2292*12f080e7Smrj 	dma->dp_partial_required = B_FALSE;
2293*12f080e7Smrj 	dma->dp_trim_required = B_FALSE;
2294*12f080e7Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
2295*12f080e7Smrj #if !defined(__amd64)
2296*12f080e7Smrj 	dma->dp_cb_remaping = B_FALSE;
2297*12f080e7Smrj 	dma->dp_kva = NULL;
2298*12f080e7Smrj #endif
2299*12f080e7Smrj 
2300*12f080e7Smrj 	/* FMA related initialization */
2301*12f080e7Smrj 	hp->dmai_fault = 0;
2302*12f080e7Smrj 	hp->dmai_fault_check = NULL;
2303*12f080e7Smrj 	hp->dmai_fault_notify = NULL;
2304*12f080e7Smrj 	hp->dmai_error.err_ena = 0;
2305*12f080e7Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
2306*12f080e7Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2307*12f080e7Smrj 	hp->dmai_error.err_ontrap = NULL;
2308*12f080e7Smrj 	hp->dmai_error.err_fep = NULL;
2309*12f080e7Smrj }
2310*12f080e7Smrj 
2311*12f080e7Smrj 
2312*12f080e7Smrj /*
2313*12f080e7Smrj  * rootnex_valid_alloc_parms()
2314*12f080e7Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2315*12f080e7Smrj  */
2316*12f080e7Smrj static int
2317*12f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2318*12f080e7Smrj {
2319*12f080e7Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2320*12f080e7Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2321*12f080e7Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2322*12f080e7Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2323*12f080e7Smrj 		return (DDI_DMA_BADATTR);
2324*12f080e7Smrj 	}
2325*12f080e7Smrj 
2326*12f080e7Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2327*12f080e7Smrj 		return (DDI_DMA_BADATTR);
2328*12f080e7Smrj 	}
2329*12f080e7Smrj 
2330*12f080e7Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2331*12f080e7Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2332*12f080e7Smrj 	    attr->dma_attr_sgllen <= 0) {
2333*12f080e7Smrj 		return (DDI_DMA_BADATTR);
2334*12f080e7Smrj 	}
2335*12f080e7Smrj 
2336*12f080e7Smrj 	/* We should be able to DMA into every byte offset in a page */
2337*12f080e7Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
2338*12f080e7Smrj 		return (DDI_DMA_BADATTR);
2339*12f080e7Smrj 	}
2340*12f080e7Smrj 
2341*12f080e7Smrj 	return (DDI_SUCCESS);
2342*12f080e7Smrj }
2343*12f080e7Smrj 
2344*12f080e7Smrj 
2345*12f080e7Smrj /*
2346*12f080e7Smrj  * rootnex_valid_bind_parms()
2347*12f080e7Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2348*12f080e7Smrj  */
2349*12f080e7Smrj /* ARGSUSED */
2350*12f080e7Smrj static int
2351*12f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2352*12f080e7Smrj {
2353*12f080e7Smrj #if !defined(__amd64)
2354*12f080e7Smrj 	/*
2355*12f080e7Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2356*12f080e7Smrj 	 * we can track the offset for the obsoleted interfaces.
2357*12f080e7Smrj 	 */
2358*12f080e7Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2359*12f080e7Smrj 		return (DDI_DMA_TOOBIG);
2360*12f080e7Smrj 	}
2361*12f080e7Smrj #endif
2362*12f080e7Smrj 
2363*12f080e7Smrj 	return (DDI_SUCCESS);
2364*12f080e7Smrj }
2365*12f080e7Smrj 
2366*12f080e7Smrj 
2367*12f080e7Smrj /*
2368*12f080e7Smrj  * rootnex_get_sgl()
2369*12f080e7Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2370*12f080e7Smrj  *    with a call to the vm layer when vm2.0 comes around...
2371*12f080e7Smrj  */
2372*12f080e7Smrj static void
2373*12f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2374*12f080e7Smrj     rootnex_sglinfo_t *sglinfo)
2375*12f080e7Smrj {
2376*12f080e7Smrj 	ddi_dma_atyp_t buftype;
2377*12f080e7Smrj 	uint64_t last_page;
2378*12f080e7Smrj 	uint64_t offset;
2379*12f080e7Smrj 	uint64_t addrhi;
2380*12f080e7Smrj 	uint64_t addrlo;
2381*12f080e7Smrj 	uint64_t maxseg;
2382*12f080e7Smrj 	page_t **pplist;
2383*12f080e7Smrj 	uint64_t paddr;
2384*12f080e7Smrj 	uint32_t psize;
2385*12f080e7Smrj 	uint32_t size;
2386*12f080e7Smrj 	caddr_t vaddr;
2387*12f080e7Smrj 	uint_t pcnt;
2388*12f080e7Smrj 	page_t *pp;
2389*12f080e7Smrj 	uint_t cnt;
2390*12f080e7Smrj 
2391*12f080e7Smrj 
2392*12f080e7Smrj 	/* shortcuts */
2393*12f080e7Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2394*12f080e7Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2395*12f080e7Smrj 	maxseg = sglinfo->si_max_cookie_size;
2396*12f080e7Smrj 	buftype = dmar_object->dmao_type;
2397*12f080e7Smrj 	addrhi = sglinfo->si_max_addr;
2398*12f080e7Smrj 	addrlo = sglinfo->si_min_addr;
2399*12f080e7Smrj 	size = dmar_object->dmao_size;
2400*12f080e7Smrj 
2401*12f080e7Smrj 	pcnt = 0;
2402*12f080e7Smrj 	cnt = 0;
2403*12f080e7Smrj 
2404*12f080e7Smrj 	/*
2405*12f080e7Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
2406*12f080e7Smrj 	 * page_t, use this to get our physical address and buf offset.
2407*12f080e7Smrj 	 */
2408*12f080e7Smrj 	if (buftype == DMA_OTYP_PAGES) {
2409*12f080e7Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2410*12f080e7Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2411*12f080e7Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2412*12f080e7Smrj 		    MMU_PAGEOFFSET;
2413*12f080e7Smrj 		paddr = ptob64(pp->p_pagenum) + offset;
2414*12f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2415*12f080e7Smrj 		pp = pp->p_next;
2416*12f080e7Smrj 		sglinfo->si_asp = NULL;
2417*12f080e7Smrj 
2418*12f080e7Smrj 	/*
2419*12f080e7Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
2420*12f080e7Smrj 	 * down an array of pages, use this to get our physical address and buf
2421*12f080e7Smrj 	 * offset.
2422*12f080e7Smrj 	 */
2423*12f080e7Smrj 	} else if (pplist != NULL) {
2424*12f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2425*12f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2426*12f080e7Smrj 
2427*12f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2428*12f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2429*12f080e7Smrj 		if (sglinfo->si_asp == NULL) {
2430*12f080e7Smrj 			sglinfo->si_asp = &kas;
2431*12f080e7Smrj 		}
2432*12f080e7Smrj 
2433*12f080e7Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2434*12f080e7Smrj 		paddr = ptob64(pplist[pcnt]->p_pagenum);
2435*12f080e7Smrj 		paddr += offset;
2436*12f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2437*12f080e7Smrj 		pcnt++;
2438*12f080e7Smrj 
2439*12f080e7Smrj 	/*
2440*12f080e7Smrj 	 * All we have is a virtual address, we'll need to call into the VM
2441*12f080e7Smrj 	 * to get the physical address.
2442*12f080e7Smrj 	 */
2443*12f080e7Smrj 	} else {
2444*12f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2445*12f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2446*12f080e7Smrj 
2447*12f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2448*12f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2449*12f080e7Smrj 		if (sglinfo->si_asp == NULL) {
2450*12f080e7Smrj 			sglinfo->si_asp = &kas;
2451*12f080e7Smrj 		}
2452*12f080e7Smrj 
2453*12f080e7Smrj 		paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2454*12f080e7Smrj 		paddr += offset;
2455*12f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2456*12f080e7Smrj 		vaddr += psize;
2457*12f080e7Smrj 	}
2458*12f080e7Smrj 
2459*12f080e7Smrj 	/*
2460*12f080e7Smrj 	 * Setup the first cookie with the physical address of the page and the
2461*12f080e7Smrj 	 * size of the page (which takes into account the initial offset into
2462*12f080e7Smrj 	 * the page.
2463*12f080e7Smrj 	 */
2464*12f080e7Smrj 	sgl[cnt].dmac_laddress = paddr;
2465*12f080e7Smrj 	sgl[cnt].dmac_size = psize;
2466*12f080e7Smrj 	sgl[cnt].dmac_type = 0;
2467*12f080e7Smrj 
2468*12f080e7Smrj 	/*
2469*12f080e7Smrj 	 * Save away the buffer offset into the page. We'll need this later in
2470*12f080e7Smrj 	 * the copy buffer code to help figure out the page index within the
2471*12f080e7Smrj 	 * buffer and the offset into the current page.
2472*12f080e7Smrj 	 */
2473*12f080e7Smrj 	sglinfo->si_buf_offset = offset;
2474*12f080e7Smrj 
2475*12f080e7Smrj 	/*
2476*12f080e7Smrj 	 * If the DMA engine can't reach the physical address, increase how
2477*12f080e7Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
2478*12f080e7Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
2479*12f080e7Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2480*12f080e7Smrj 	 * last cookie, go to the next cookie (since we separate each page which
2481*12f080e7Smrj 	 * uses the copy buffer in case the copy buffer is not physically
2482*12f080e7Smrj 	 * contiguous.
2483*12f080e7Smrj 	 */
2484*12f080e7Smrj 	if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2485*12f080e7Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2486*12f080e7Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2487*12f080e7Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
2488*12f080e7Smrj 			cnt++;
2489*12f080e7Smrj 			sgl[cnt].dmac_laddress = 0;
2490*12f080e7Smrj 			sgl[cnt].dmac_size = 0;
2491*12f080e7Smrj 			sgl[cnt].dmac_type = 0;
2492*12f080e7Smrj 		}
2493*12f080e7Smrj 	}
2494*12f080e7Smrj 
2495*12f080e7Smrj 	/*
2496*12f080e7Smrj 	 * save this page's physical address so we can figure out if the next
2497*12f080e7Smrj 	 * page is physically contiguous. Keep decrementing size until we are
2498*12f080e7Smrj 	 * done with the buffer.
2499*12f080e7Smrj 	 */
2500*12f080e7Smrj 	last_page = paddr & MMU_PAGEMASK;
2501*12f080e7Smrj 	size -= psize;
2502*12f080e7Smrj 
2503*12f080e7Smrj 	while (size > 0) {
2504*12f080e7Smrj 		/* Get the size for this page (i.e. partial or full page) */
2505*12f080e7Smrj 		psize = MIN(size, MMU_PAGESIZE);
2506*12f080e7Smrj 
2507*12f080e7Smrj 		if (buftype == DMA_OTYP_PAGES) {
2508*12f080e7Smrj 			/* get the paddr from the page_t */
2509*12f080e7Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2510*12f080e7Smrj 			paddr = ptob64(pp->p_pagenum);
2511*12f080e7Smrj 			pp = pp->p_next;
2512*12f080e7Smrj 		} else if (pplist != NULL) {
2513*12f080e7Smrj 			/* index into the array of page_t's to get the paddr */
2514*12f080e7Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2515*12f080e7Smrj 			paddr = ptob64(pplist[pcnt]->p_pagenum);
2516*12f080e7Smrj 			pcnt++;
2517*12f080e7Smrj 		} else {
2518*12f080e7Smrj 			/* call into the VM to get the paddr */
2519*12f080e7Smrj 			paddr =  ptob64(hat_getpfnum(sglinfo->si_asp->a_hat,
2520*12f080e7Smrj 			    vaddr));
2521*12f080e7Smrj 			vaddr += psize;
2522*12f080e7Smrj 		}
2523*12f080e7Smrj 
2524*12f080e7Smrj 		/* check to see if this page needs the copy buffer */
2525*12f080e7Smrj 		if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2526*12f080e7Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2527*12f080e7Smrj 
2528*12f080e7Smrj 			/*
2529*12f080e7Smrj 			 * if there is something in the current cookie, go to
2530*12f080e7Smrj 			 * the next one. We only want one page in a cookie which
2531*12f080e7Smrj 			 * uses the copybuf since the copybuf doesn't have to
2532*12f080e7Smrj 			 * be physically contiguous.
2533*12f080e7Smrj 			 */
2534*12f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
2535*12f080e7Smrj 				cnt++;
2536*12f080e7Smrj 			}
2537*12f080e7Smrj 			sgl[cnt].dmac_laddress = paddr;
2538*12f080e7Smrj 			sgl[cnt].dmac_size = psize;
2539*12f080e7Smrj #if defined(__amd64)
2540*12f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2541*12f080e7Smrj #else
2542*12f080e7Smrj 			/*
2543*12f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
2544*12f080e7Smrj 			 * obsoleted interfaces.
2545*12f080e7Smrj 			 */
2546*12f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2547*12f080e7Smrj 			    (dmar_object->dmao_size - size);
2548*12f080e7Smrj #endif
2549*12f080e7Smrj 			/* if this isn't the last cookie, go to the next one */
2550*12f080e7Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
2551*12f080e7Smrj 				cnt++;
2552*12f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
2553*12f080e7Smrj 				sgl[cnt].dmac_size = 0;
2554*12f080e7Smrj 				sgl[cnt].dmac_type = 0;
2555*12f080e7Smrj 			}
2556*12f080e7Smrj 
2557*12f080e7Smrj 		/*
2558*12f080e7Smrj 		 * this page didn't need the copy buffer, if it's not physically
2559*12f080e7Smrj 		 * contiguous, or it would put us over a segment boundary, or it
2560*12f080e7Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
2561*12f080e7Smrj 		 * have anything in it.
2562*12f080e7Smrj 		 */
2563*12f080e7Smrj 		} else if (((last_page + MMU_PAGESIZE) != paddr) ||
2564*12f080e7Smrj 		    !(paddr & sglinfo->si_segmask) ||
2565*12f080e7Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2566*12f080e7Smrj 		    (sgl[cnt].dmac_size == 0)) {
2567*12f080e7Smrj 			/*
2568*12f080e7Smrj 			 * if we're not already in a new cookie, go to the next
2569*12f080e7Smrj 			 * cookie.
2570*12f080e7Smrj 			 */
2571*12f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
2572*12f080e7Smrj 				cnt++;
2573*12f080e7Smrj 			}
2574*12f080e7Smrj 
2575*12f080e7Smrj 			/* save the cookie information */
2576*12f080e7Smrj 			sgl[cnt].dmac_laddress = paddr;
2577*12f080e7Smrj 			sgl[cnt].dmac_size = psize;
2578*12f080e7Smrj #if defined(__amd64)
2579*12f080e7Smrj 			sgl[cnt].dmac_type = 0;
2580*12f080e7Smrj #else
2581*12f080e7Smrj 			/*
2582*12f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
2583*12f080e7Smrj 			 * obsoleted interfaces.
2584*12f080e7Smrj 			 */
2585*12f080e7Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2586*12f080e7Smrj #endif
2587*12f080e7Smrj 
2588*12f080e7Smrj 		/*
2589*12f080e7Smrj 		 * this page didn't need the copy buffer, it is physically
2590*12f080e7Smrj 		 * contiguous with the last page, and it's <= the max cookie
2591*12f080e7Smrj 		 * size.
2592*12f080e7Smrj 		 */
2593*12f080e7Smrj 		} else {
2594*12f080e7Smrj 			sgl[cnt].dmac_size += psize;
2595*12f080e7Smrj 
2596*12f080e7Smrj 			/*
2597*12f080e7Smrj 			 * if this exactly ==  the maximum cookie size, and
2598*12f080e7Smrj 			 * it isn't the last cookie, go to the next cookie.
2599*12f080e7Smrj 			 */
2600*12f080e7Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2601*12f080e7Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2602*12f080e7Smrj 				cnt++;
2603*12f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
2604*12f080e7Smrj 				sgl[cnt].dmac_size = 0;
2605*12f080e7Smrj 				sgl[cnt].dmac_type = 0;
2606*12f080e7Smrj 			}
2607*12f080e7Smrj 		}
2608*12f080e7Smrj 
2609*12f080e7Smrj 		/*
2610*12f080e7Smrj 		 * save this page's physical address so we can figure out if the
2611*12f080e7Smrj 		 * next page is physically contiguous. Keep decrementing size
2612*12f080e7Smrj 		 * until we are done with the buffer.
2613*12f080e7Smrj 		 */
2614*12f080e7Smrj 		last_page = paddr;
2615*12f080e7Smrj 		size -= psize;
2616*12f080e7Smrj 	}
2617*12f080e7Smrj 
2618*12f080e7Smrj 	/* we're done, save away how many cookies the sgl has */
2619*12f080e7Smrj 	if (sgl[cnt].dmac_size == 0) {
2620*12f080e7Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
2621*12f080e7Smrj 		sglinfo->si_sgl_size = cnt;
2622*12f080e7Smrj 	} else {
2623*12f080e7Smrj 		sglinfo->si_sgl_size = cnt + 1;
2624*12f080e7Smrj 	}
2625*12f080e7Smrj }
2626*12f080e7Smrj 
2627*12f080e7Smrj 
2628*12f080e7Smrj /*
2629*12f080e7Smrj  * rootnex_bind_slowpath()
2630*12f080e7Smrj  *    Call in the bind path if the calling driver can't use the sgl without
2631*12f080e7Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
2632*12f080e7Smrj  *    with a partial bind.
2633*12f080e7Smrj  */
2634*12f080e7Smrj static int
2635*12f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2636*12f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2637*12f080e7Smrj {
2638*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
2639*12f080e7Smrj 	rootnex_window_t *window;
2640*12f080e7Smrj 	ddi_dma_cookie_t *cookie;
2641*12f080e7Smrj 	size_t copybuf_used;
2642*12f080e7Smrj 	size_t dmac_size;
2643*12f080e7Smrj 	boolean_t partial;
2644*12f080e7Smrj 	off_t cur_offset;
2645*12f080e7Smrj 	page_t *cur_pp;
2646*12f080e7Smrj 	major_t mnum;
2647*12f080e7Smrj 	int e;
2648*12f080e7Smrj 	int i;
2649*12f080e7Smrj 
2650*12f080e7Smrj 
2651*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
2652*12f080e7Smrj 	copybuf_used = 0;
2653*12f080e7Smrj 	partial = B_FALSE;
2654*12f080e7Smrj 
2655*12f080e7Smrj 	/*
2656*12f080e7Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
2657*12f080e7Smrj 	 * Needs to be first since it sets the copy buffer size.
2658*12f080e7Smrj 	 */
2659*12f080e7Smrj 	if (sinfo->si_copybuf_req != 0) {
2660*12f080e7Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2661*12f080e7Smrj 		if (e != DDI_SUCCESS) {
2662*12f080e7Smrj 			return (e);
2663*12f080e7Smrj 		}
2664*12f080e7Smrj 	} else {
2665*12f080e7Smrj 		dma->dp_copybuf_size = 0;
2666*12f080e7Smrj 	}
2667*12f080e7Smrj 
2668*12f080e7Smrj 	/*
2669*12f080e7Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
2670*12f080e7Smrj 	 * if we need to trim the buffers when we munge the sgl.
2671*12f080e7Smrj 	 */
2672*12f080e7Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2673*12f080e7Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2674*12f080e7Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2675*12f080e7Smrj 		dma->dp_partial_required = B_TRUE;
2676*12f080e7Smrj 		if (attr->dma_attr_granular != 1) {
2677*12f080e7Smrj 			dma->dp_trim_required = B_TRUE;
2678*12f080e7Smrj 		}
2679*12f080e7Smrj 	} else {
2680*12f080e7Smrj 		dma->dp_partial_required = B_FALSE;
2681*12f080e7Smrj 		dma->dp_trim_required = B_FALSE;
2682*12f080e7Smrj 	}
2683*12f080e7Smrj 
2684*12f080e7Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
2685*12f080e7Smrj 	if (dma->dp_partial_required &&
2686*12f080e7Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2687*12f080e7Smrj 
2688*12f080e7Smrj 		mnum = ddi_driver_major(dma->dp_dip);
2689*12f080e7Smrj 		/*
2690*12f080e7Smrj 		 * patchable which allows us to print one warning per major
2691*12f080e7Smrj 		 * number.
2692*12f080e7Smrj 		 */
2693*12f080e7Smrj 		if ((rootnex_bind_warn) &&
2694*12f080e7Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2695*12f080e7Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2696*12f080e7Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2697*12f080e7Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2698*12f080e7Smrj 			    "There is a small risk of data corruption in "
2699*12f080e7Smrj 			    "particular with large I/Os. The driver should be "
2700*12f080e7Smrj 			    "replaced with a corrected version for proper "
2701*12f080e7Smrj 			    "system operation. To disable this warning, add "
2702*12f080e7Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
2703*12f080e7Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2704*12f080e7Smrj 		}
2705*12f080e7Smrj 		return (DDI_DMA_TOOBIG);
2706*12f080e7Smrj 	}
2707*12f080e7Smrj 
2708*12f080e7Smrj 	/*
2709*12f080e7Smrj 	 * we might need multiple windows, setup state to handle them. In this
2710*12f080e7Smrj 	 * code path, we will have at least one window.
2711*12f080e7Smrj 	 */
2712*12f080e7Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2713*12f080e7Smrj 	if (e != DDI_SUCCESS) {
2714*12f080e7Smrj 		rootnex_teardown_copybuf(dma);
2715*12f080e7Smrj 		return (e);
2716*12f080e7Smrj 	}
2717*12f080e7Smrj 
2718*12f080e7Smrj 	window = &dma->dp_window[0];
2719*12f080e7Smrj 	cookie = &dma->dp_cookies[0];
2720*12f080e7Smrj 	cur_offset = 0;
2721*12f080e7Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2722*12f080e7Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2723*12f080e7Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2724*12f080e7Smrj 	}
2725*12f080e7Smrj 
2726*12f080e7Smrj 	/* loop though all the cookies we got back from get_sgl() */
2727*12f080e7Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2728*12f080e7Smrj 		/*
2729*12f080e7Smrj 		 * If we're using the copy buffer, check this cookie and setup
2730*12f080e7Smrj 		 * its associated copy buffer state. If this cookie uses the
2731*12f080e7Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
2732*12f080e7Smrj 		 */
2733*12f080e7Smrj 		if (dma->dp_copybuf_size > 0) {
2734*12f080e7Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2735*12f080e7Smrj 			    cur_offset, &copybuf_used, &cur_pp);
2736*12f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2737*12f080e7Smrj 				window->wd_dosync = B_TRUE;
2738*12f080e7Smrj 			}
2739*12f080e7Smrj 		}
2740*12f080e7Smrj 
2741*12f080e7Smrj 		/*
2742*12f080e7Smrj 		 * save away the cookie size, since it could be modified in
2743*12f080e7Smrj 		 * the windowing code.
2744*12f080e7Smrj 		 */
2745*12f080e7Smrj 		dmac_size = cookie->dmac_size;
2746*12f080e7Smrj 
2747*12f080e7Smrj 		/* if we went over max copybuf size */
2748*12f080e7Smrj 		if (dma->dp_copybuf_size &&
2749*12f080e7Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
2750*12f080e7Smrj 			partial = B_TRUE;
2751*12f080e7Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2752*12f080e7Smrj 			    cookie, cur_offset, &copybuf_used);
2753*12f080e7Smrj 			if (e != DDI_SUCCESS) {
2754*12f080e7Smrj 				rootnex_teardown_copybuf(dma);
2755*12f080e7Smrj 				rootnex_teardown_windows(dma);
2756*12f080e7Smrj 				return (e);
2757*12f080e7Smrj 			}
2758*12f080e7Smrj 
2759*12f080e7Smrj 			/*
2760*12f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
2761*12f080e7Smrj 			 * new window we just moved to is set to sync.
2762*12f080e7Smrj 			 */
2763*12f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2764*12f080e7Smrj 				window->wd_dosync = B_TRUE;
2765*12f080e7Smrj 			}
2766*12f080e7Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2767*12f080e7Smrj 			    dma->dp_dip);
2768*12f080e7Smrj 
2769*12f080e7Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
2770*12f080e7Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2771*12f080e7Smrj 			partial = B_TRUE;
2772*12f080e7Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2773*12f080e7Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2774*12f080e7Smrj 			    cookie, attr, cur_offset);
2775*12f080e7Smrj 			if (e != DDI_SUCCESS) {
2776*12f080e7Smrj 				rootnex_teardown_copybuf(dma);
2777*12f080e7Smrj 				rootnex_teardown_windows(dma);
2778*12f080e7Smrj 				return (e);
2779*12f080e7Smrj 			}
2780*12f080e7Smrj 
2781*12f080e7Smrj 			/*
2782*12f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
2783*12f080e7Smrj 			 * new window we just moved to is set to sync.
2784*12f080e7Smrj 			 */
2785*12f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2786*12f080e7Smrj 				window->wd_dosync = B_TRUE;
2787*12f080e7Smrj 			}
2788*12f080e7Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2789*12f080e7Smrj 			    dma->dp_dip);
2790*12f080e7Smrj 
2791*12f080e7Smrj 		/* else if we will be over maxxfer */
2792*12f080e7Smrj 		} else if ((window->wd_size + dmac_size) >
2793*12f080e7Smrj 		    dma->dp_maxxfer) {
2794*12f080e7Smrj 			partial = B_TRUE;
2795*12f080e7Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2796*12f080e7Smrj 			    cookie);
2797*12f080e7Smrj 			if (e != DDI_SUCCESS) {
2798*12f080e7Smrj 				rootnex_teardown_copybuf(dma);
2799*12f080e7Smrj 				rootnex_teardown_windows(dma);
2800*12f080e7Smrj 				return (e);
2801*12f080e7Smrj 			}
2802*12f080e7Smrj 
2803*12f080e7Smrj 			/*
2804*12f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
2805*12f080e7Smrj 			 * new window we just moved to is set to sync.
2806*12f080e7Smrj 			 */
2807*12f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2808*12f080e7Smrj 				window->wd_dosync = B_TRUE;
2809*12f080e7Smrj 			}
2810*12f080e7Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2811*12f080e7Smrj 			    dma->dp_dip);
2812*12f080e7Smrj 
2813*12f080e7Smrj 		/* else this cookie fits in the current window */
2814*12f080e7Smrj 		} else {
2815*12f080e7Smrj 			window->wd_cookie_cnt++;
2816*12f080e7Smrj 			window->wd_size += dmac_size;
2817*12f080e7Smrj 		}
2818*12f080e7Smrj 
2819*12f080e7Smrj 		/* track our offset into the buffer, go to the next cookie */
2820*12f080e7Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2821*12f080e7Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
2822*12f080e7Smrj 		cur_offset += dmac_size;
2823*12f080e7Smrj 		cookie++;
2824*12f080e7Smrj 	}
2825*12f080e7Smrj 
2826*12f080e7Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
2827*12f080e7Smrj 	if (window->wd_size == 0) {
2828*12f080e7Smrj 		hp->dmai_nwin--;
2829*12f080e7Smrj 		window--;
2830*12f080e7Smrj 	}
2831*12f080e7Smrj 
2832*12f080e7Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
2833*12f080e7Smrj 
2834*12f080e7Smrj 	if (!partial) {
2835*12f080e7Smrj 		return (DDI_DMA_MAPPED);
2836*12f080e7Smrj 	}
2837*12f080e7Smrj 
2838*12f080e7Smrj 	ASSERT(dma->dp_partial_required);
2839*12f080e7Smrj 	return (DDI_DMA_PARTIAL_MAP);
2840*12f080e7Smrj }
2841*12f080e7Smrj 
2842*12f080e7Smrj 
2843*12f080e7Smrj /*
2844*12f080e7Smrj  * rootnex_setup_copybuf()
2845*12f080e7Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
2846*12f080e7Smrj  *    buffer, and if we do, sets up the basic state to handle it.
2847*12f080e7Smrj  */
2848*12f080e7Smrj static int
2849*12f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2850*12f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
2851*12f080e7Smrj {
2852*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
2853*12f080e7Smrj 	ddi_dma_attr_t lattr;
2854*12f080e7Smrj 	size_t max_copybuf;
2855*12f080e7Smrj 	int cansleep;
2856*12f080e7Smrj 	int e;
2857*12f080e7Smrj #if !defined(__amd64)
2858*12f080e7Smrj 	int vmflag;
2859*12f080e7Smrj #endif
2860*12f080e7Smrj 
2861*12f080e7Smrj 
2862*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
2863*12f080e7Smrj 
2864*12f080e7Smrj 	/*
2865*12f080e7Smrj 	 * read this first so it's consistent through the routine so we can
2866*12f080e7Smrj 	 * patch it on the fly.
2867*12f080e7Smrj 	 */
2868*12f080e7Smrj 	max_copybuf = rootnex_max_copybuf_size & MMU_PAGEMASK;
2869*12f080e7Smrj 
2870*12f080e7Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
2871*12f080e7Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
2872*12f080e7Smrj 
2873*12f080e7Smrj 	/* make sure the copybuf size <= the max size */
2874*12f080e7Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
2875*12f080e7Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
2876*12f080e7Smrj 
2877*12f080e7Smrj #if !defined(__amd64)
2878*12f080e7Smrj 	/*
2879*12f080e7Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
2880*12f080e7Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
2881*12f080e7Smrj 	 * the 64-bit kernel.
2882*12f080e7Smrj 	 */
2883*12f080e7Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
2884*12f080e7Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
2885*12f080e7Smrj 
2886*12f080e7Smrj 		/* convert the sleep flags */
2887*12f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2888*12f080e7Smrj 			vmflag = VM_SLEEP;
2889*12f080e7Smrj 		} else {
2890*12f080e7Smrj 			vmflag = VM_NOSLEEP;
2891*12f080e7Smrj 		}
2892*12f080e7Smrj 
2893*12f080e7Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
2894*12f080e7Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
2895*12f080e7Smrj 		    vmflag);
2896*12f080e7Smrj 		if (dma->dp_kva == NULL) {
2897*12f080e7Smrj 			return (DDI_DMA_NORESOURCES);
2898*12f080e7Smrj 		}
2899*12f080e7Smrj 	}
2900*12f080e7Smrj #endif
2901*12f080e7Smrj 
2902*12f080e7Smrj 	/* convert the sleep flags */
2903*12f080e7Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2904*12f080e7Smrj 		cansleep = 1;
2905*12f080e7Smrj 	} else {
2906*12f080e7Smrj 		cansleep = 0;
2907*12f080e7Smrj 	}
2908*12f080e7Smrj 
2909*12f080e7Smrj 	/*
2910*12f080e7Smrj 	 * Allocated the actual copy buffer. This needs to fit within the DMA
2911*12f080e7Smrj 	 * engines limits, so we can't use kmem_alloc...
2912*12f080e7Smrj 	 */
2913*12f080e7Smrj 	lattr = *attr;
2914*12f080e7Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
2915*12f080e7Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
2916*12f080e7Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
2917*12f080e7Smrj 	if (e != DDI_SUCCESS) {
2918*12f080e7Smrj #if !defined(__amd64)
2919*12f080e7Smrj 		if (dma->dp_kva != NULL) {
2920*12f080e7Smrj 			vmem_free(heap_arena, dma->dp_kva,
2921*12f080e7Smrj 			    dma->dp_copybuf_size);
2922*12f080e7Smrj 		}
2923*12f080e7Smrj #endif
2924*12f080e7Smrj 		return (DDI_DMA_NORESOURCES);
2925*12f080e7Smrj 	}
2926*12f080e7Smrj 
2927*12f080e7Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
2928*12f080e7Smrj 	    size_t, dma->dp_copybuf_size);
2929*12f080e7Smrj 
2930*12f080e7Smrj 	return (DDI_SUCCESS);
2931*12f080e7Smrj }
2932*12f080e7Smrj 
2933*12f080e7Smrj 
2934*12f080e7Smrj /*
2935*12f080e7Smrj  * rootnex_setup_windows()
2936*12f080e7Smrj  *    Called in bind slowpath to setup the window state. We always have windows
2937*12f080e7Smrj  *    in the slowpath. Even if the window count = 1.
2938*12f080e7Smrj  */
2939*12f080e7Smrj static int
2940*12f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2941*12f080e7Smrj     ddi_dma_attr_t *attr, int kmflag)
2942*12f080e7Smrj {
2943*12f080e7Smrj 	rootnex_window_t *windowp;
2944*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
2945*12f080e7Smrj 	size_t copy_state_size;
2946*12f080e7Smrj 	size_t win_state_size;
2947*12f080e7Smrj 	size_t state_available;
2948*12f080e7Smrj 	size_t space_needed;
2949*12f080e7Smrj 	uint_t copybuf_win;
2950*12f080e7Smrj 	uint_t maxxfer_win;
2951*12f080e7Smrj 	size_t space_used;
2952*12f080e7Smrj 	uint_t sglwin;
2953*12f080e7Smrj 
2954*12f080e7Smrj 
2955*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
2956*12f080e7Smrj 
2957*12f080e7Smrj 	dma->dp_current_win = 0;
2958*12f080e7Smrj 	hp->dmai_nwin = 0;
2959*12f080e7Smrj 
2960*12f080e7Smrj 	/* If we don't need to do a partial, we only have one window */
2961*12f080e7Smrj 	if (!dma->dp_partial_required) {
2962*12f080e7Smrj 		dma->dp_max_win = 1;
2963*12f080e7Smrj 
2964*12f080e7Smrj 	/*
2965*12f080e7Smrj 	 * we need multiple windows, need to figure out the worse case number
2966*12f080e7Smrj 	 * of windows.
2967*12f080e7Smrj 	 */
29687c478bd9Sstevel@tonic-gate 	} else {
29697c478bd9Sstevel@tonic-gate 		/*
2970*12f080e7Smrj 		 * if we need windows because we need more copy buffer that
2971*12f080e7Smrj 		 * we allow, the worse case number of windows we could need
2972*12f080e7Smrj 		 * here would be (copybuf space required / copybuf space that
2973*12f080e7Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
2974*12f080e7Smrj 		 * extra pages on the trim for the first and last pages of the
2975*12f080e7Smrj 		 * buffer (a page is the minimum window size so under the right
2976*12f080e7Smrj 		 * attr settings, you could have a window for each page).
2977*12f080e7Smrj 		 * The last page will only be hit here if the size is not a
2978*12f080e7Smrj 		 * multiple of the granularity (which theoretically shouldn't
2979*12f080e7Smrj 		 * be the case but never has been enforced, so we could have
2980*12f080e7Smrj 		 * broken things without it).
29817c478bd9Sstevel@tonic-gate 		 */
2982*12f080e7Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
2983*12f080e7Smrj 			ASSERT(dma->dp_copybuf_size > 0);
2984*12f080e7Smrj 			copybuf_win = (sinfo->si_copybuf_req /
2985*12f080e7Smrj 			    dma->dp_copybuf_size) + 1 + 2;
29867c478bd9Sstevel@tonic-gate 		} else {
2987*12f080e7Smrj 			copybuf_win = 0;
29887c478bd9Sstevel@tonic-gate 		}
2989*12f080e7Smrj 
2990*12f080e7Smrj 		/*
2991*12f080e7Smrj 		 * if we need windows because we have more cookies than the H/W
2992*12f080e7Smrj 		 * can handle, the number of windows we would need here would
2993*12f080e7Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
2994*12f080e7Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
2995*12f080e7Smrj 		 * (see above comment about trim)
2996*12f080e7Smrj 		 */
2997*12f080e7Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
2998*12f080e7Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
2999*12f080e7Smrj 			    + 1) + 2;
30007c478bd9Sstevel@tonic-gate 		} else {
3001*12f080e7Smrj 			sglwin = 0;
30027c478bd9Sstevel@tonic-gate 		}
3003*12f080e7Smrj 
3004*12f080e7Smrj 		/*
3005*12f080e7Smrj 		 * if we need windows because we're binding more memory than the
3006*12f080e7Smrj 		 * H/W can transfer at once, the number of windows we would need
3007*12f080e7Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
3008*12f080e7Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
3009*12f080e7Smrj 		 * trim (see above comment about trim)
3010*12f080e7Smrj 		 */
3011*12f080e7Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
3012*12f080e7Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
3013*12f080e7Smrj 			    dma->dp_maxxfer) + 1 + 2;
3014*12f080e7Smrj 		} else {
3015*12f080e7Smrj 			maxxfer_win = 0;
30167c478bd9Sstevel@tonic-gate 		}
3017*12f080e7Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3018*12f080e7Smrj 		ASSERT(dma->dp_max_win > 0);
3019*12f080e7Smrj 	}
3020*12f080e7Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3021*12f080e7Smrj 
3022*12f080e7Smrj 	/*
3023*12f080e7Smrj 	 * Get space for window and potential copy buffer state. Before we
3024*12f080e7Smrj 	 * go and allocate memory, see if we can get away with using what's
3025*12f080e7Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
3026*12f080e7Smrj 	 */
3027*12f080e7Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3028*12f080e7Smrj 	    sizeof (ddi_dma_cookie_t));
3029*12f080e7Smrj 
3030*12f080e7Smrj 	/* if we dynamically allocated space for the cookies */
3031*12f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
3032*12f080e7Smrj 		/* if we have more space in the pre-allocted buffer, use it */
3033*12f080e7Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
3034*12f080e7Smrj 		if ((dma->dp_cookie_size - space_used) <=
3035*12f080e7Smrj 		    rootnex_state->r_prealloc_size) {
3036*12f080e7Smrj 			state_available = rootnex_state->r_prealloc_size;
3037*12f080e7Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3038*12f080e7Smrj 
3039*12f080e7Smrj 		/*
3040*12f080e7Smrj 		 * else, we have more free space in the dynamically allocated
3041*12f080e7Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3042*12f080e7Smrj 		 * didn't need a lot of cookies.
3043*12f080e7Smrj 		 */
3044*12f080e7Smrj 		} else {
3045*12f080e7Smrj 			state_available = dma->dp_cookie_size - space_used;
3046*12f080e7Smrj 			windowp = (rootnex_window_t *)
3047*12f080e7Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
3048*12f080e7Smrj 		}
3049*12f080e7Smrj 
3050*12f080e7Smrj 	/* we used the pre-alloced buffer */
3051*12f080e7Smrj 	} else {
3052*12f080e7Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3053*12f080e7Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
3054*12f080e7Smrj 		windowp = (rootnex_window_t *)
3055*12f080e7Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
3056*12f080e7Smrj 	}
3057*12f080e7Smrj 
3058*12f080e7Smrj 	/*
3059*12f080e7Smrj 	 * figure out how much state we need to track the copy buffer. Add an
3060*12f080e7Smrj 	 * addition 8 bytes for pointer alignemnt later.
3061*12f080e7Smrj 	 */
3062*12f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
3063*12f080e7Smrj 		copy_state_size = sinfo->si_max_pages *
3064*12f080e7Smrj 		    sizeof (rootnex_pgmap_t);
3065*12f080e7Smrj 	} else {
3066*12f080e7Smrj 		copy_state_size = 0;
3067*12f080e7Smrj 	}
3068*12f080e7Smrj 	/* add an additional 8 bytes for pointer alignment */
3069*12f080e7Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
3070*12f080e7Smrj 
3071*12f080e7Smrj 	/* if we have enough space already, use it */
3072*12f080e7Smrj 	if (state_available >= space_needed) {
3073*12f080e7Smrj 		dma->dp_window = windowp;
3074*12f080e7Smrj 		dma->dp_need_to_free_window = B_FALSE;
3075*12f080e7Smrj 
3076*12f080e7Smrj 	/* not enough space, need to allocate more. */
3077*12f080e7Smrj 	} else {
3078*12f080e7Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3079*12f080e7Smrj 		if (dma->dp_window == NULL) {
3080*12f080e7Smrj 			return (DDI_DMA_NORESOURCES);
3081*12f080e7Smrj 		}
3082*12f080e7Smrj 		dma->dp_need_to_free_window = B_TRUE;
3083*12f080e7Smrj 		dma->dp_window_size = space_needed;
3084*12f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3085*12f080e7Smrj 		    dma->dp_dip, size_t, space_needed);
3086*12f080e7Smrj 	}
3087*12f080e7Smrj 
3088*12f080e7Smrj 	/*
3089*12f080e7Smrj 	 * we allocate copy buffer state and window state at the same time.
3090*12f080e7Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
3091*12f080e7Smrj 	 */
3092*12f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
3093*12f080e7Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3094*12f080e7Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3095*12f080e7Smrj 
3096*12f080e7Smrj #if !defined(__amd64)
3097*12f080e7Smrj 		/*
3098*12f080e7Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3099*12f080e7Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
3100*12f080e7Smrj 		 */
3101*12f080e7Smrj 		bzero(dma->dp_pgmap, copy_state_size);
3102*12f080e7Smrj #endif
3103*12f080e7Smrj 	} else {
3104*12f080e7Smrj 		dma->dp_pgmap = NULL;
3105*12f080e7Smrj 	}
3106*12f080e7Smrj 
3107*12f080e7Smrj 	return (DDI_SUCCESS);
3108*12f080e7Smrj }
3109*12f080e7Smrj 
3110*12f080e7Smrj 
3111*12f080e7Smrj /*
3112*12f080e7Smrj  * rootnex_teardown_copybuf()
3113*12f080e7Smrj  *    cleans up after rootnex_setup_copybuf()
3114*12f080e7Smrj  */
3115*12f080e7Smrj static void
3116*12f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
3117*12f080e7Smrj {
3118*12f080e7Smrj #if !defined(__amd64)
3119*12f080e7Smrj 	int i;
3120*12f080e7Smrj 
3121*12f080e7Smrj 	/*
3122*12f080e7Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
3123*12f080e7Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3124*12f080e7Smrj 	 * arena. Then free the VMEM space.
3125*12f080e7Smrj 	 */
3126*12f080e7Smrj 	if (dma->dp_kva != NULL) {
3127*12f080e7Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3128*12f080e7Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
3129*12f080e7Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3130*12f080e7Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
3131*12f080e7Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3132*12f080e7Smrj 			}
3133*12f080e7Smrj 		}
3134*12f080e7Smrj 
3135*12f080e7Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3136*12f080e7Smrj 	}
3137*12f080e7Smrj 
3138*12f080e7Smrj #endif
3139*12f080e7Smrj 
3140*12f080e7Smrj 	/* if we allocated a copy buffer, free it */
3141*12f080e7Smrj 	if (dma->dp_cbaddr != NULL) {
3142*12f080e7Smrj 		i_ddi_mem_free(dma->dp_cbaddr, 0);
3143*12f080e7Smrj 	}
3144*12f080e7Smrj }
3145*12f080e7Smrj 
3146*12f080e7Smrj 
3147*12f080e7Smrj /*
3148*12f080e7Smrj  * rootnex_teardown_windows()
3149*12f080e7Smrj  *    cleans up after rootnex_setup_windows()
3150*12f080e7Smrj  */
3151*12f080e7Smrj static void
3152*12f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
3153*12f080e7Smrj {
3154*12f080e7Smrj 	/*
3155*12f080e7Smrj 	 * if we had to allocate window state on the last bind (because we
3156*12f080e7Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
3157*12f080e7Smrj 	 */
3158*12f080e7Smrj 	if (dma->dp_need_to_free_window) {
3159*12f080e7Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
3160*12f080e7Smrj 	}
3161*12f080e7Smrj }
3162*12f080e7Smrj 
3163*12f080e7Smrj 
3164*12f080e7Smrj /*
3165*12f080e7Smrj  * rootnex_init_win()
3166*12f080e7Smrj  *    Called in bind slow path during creation of a new window. Initializes
3167*12f080e7Smrj  *    window state to default values.
3168*12f080e7Smrj  */
3169*12f080e7Smrj /*ARGSUSED*/
3170*12f080e7Smrj static void
3171*12f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3172*12f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3173*12f080e7Smrj {
3174*12f080e7Smrj 	hp->dmai_nwin++;
3175*12f080e7Smrj 	window->wd_dosync = B_FALSE;
3176*12f080e7Smrj 	window->wd_offset = cur_offset;
3177*12f080e7Smrj 	window->wd_size = 0;
3178*12f080e7Smrj 	window->wd_first_cookie = cookie;
3179*12f080e7Smrj 	window->wd_cookie_cnt = 0;
3180*12f080e7Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
3181*12f080e7Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
3182*12f080e7Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3183*12f080e7Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3184*12f080e7Smrj #if !defined(__amd64)
3185*12f080e7Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3186*12f080e7Smrj #endif
3187*12f080e7Smrj }
3188*12f080e7Smrj 
3189*12f080e7Smrj 
3190*12f080e7Smrj /*
3191*12f080e7Smrj  * rootnex_setup_cookie()
3192*12f080e7Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3193*12f080e7Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3194*12f080e7Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
3195*12f080e7Smrj  *    need during sync.
3196*12f080e7Smrj  */
3197*12f080e7Smrj static void
3198*12f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3199*12f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3200*12f080e7Smrj     page_t **cur_pp)
3201*12f080e7Smrj {
3202*12f080e7Smrj 	boolean_t copybuf_sz_power_2;
3203*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
3204*12f080e7Smrj 	uint_t pidx;
3205*12f080e7Smrj 	uint_t pcnt;
3206*12f080e7Smrj 	off_t poff;
3207*12f080e7Smrj #if defined(__amd64)
3208*12f080e7Smrj 	pfn_t pfn;
3209*12f080e7Smrj #else
3210*12f080e7Smrj 	page_t **pplist;
3211*12f080e7Smrj #endif
3212*12f080e7Smrj 
3213*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
3214*12f080e7Smrj 
3215*12f080e7Smrj 	/*
3216*12f080e7Smrj 	 * Calculate the page index relative to the start of the buffer. The
3217*12f080e7Smrj 	 * index to the current page for our buffer is the offset into the
3218*12f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
3219*12f080e7Smrj 	 * itself, shifted of course...
3220*12f080e7Smrj 	 */
3221*12f080e7Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3222*12f080e7Smrj 	ASSERT(pidx < sinfo->si_max_pages);
3223*12f080e7Smrj 
3224*12f080e7Smrj 	/* if this cookie uses the copy buffer */
3225*12f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3226*12f080e7Smrj 		/*
3227*12f080e7Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
3228*12f080e7Smrj 		 * is <= MMU_PAGESIZE.
3229*12f080e7Smrj 		 */
3230*12f080e7Smrj 
3231*12f080e7Smrj 		/*
3232*12f080e7Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
3233*12f080e7Smrj 		 * pfn which we'll use with seg kpm.
3234*12f080e7Smrj 		 */
3235*12f080e7Smrj 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
3236*12f080e7Smrj #if defined(__amd64)
3237*12f080e7Smrj 		pfn = cookie->_dmu._dmac_ll >> MMU_PAGESHIFT;
3238*12f080e7Smrj #endif
3239*12f080e7Smrj 
3240*12f080e7Smrj 		/* figure out if the copybuf size is a power of 2 */
3241*12f080e7Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3242*12f080e7Smrj 			copybuf_sz_power_2 = B_FALSE;
3243*12f080e7Smrj 		} else {
3244*12f080e7Smrj 			copybuf_sz_power_2 = B_TRUE;
3245*12f080e7Smrj 		}
3246*12f080e7Smrj 
3247*12f080e7Smrj 		/* This page uses the copy buffer */
3248*12f080e7Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3249*12f080e7Smrj 
3250*12f080e7Smrj 		/*
3251*12f080e7Smrj 		 * save the copy buffer KVA that we'll use with this page.
3252*12f080e7Smrj 		 * if we still fit within the copybuf, it's a simple add.
3253*12f080e7Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
3254*12f080e7Smrj 		 */
3255*12f080e7Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3256*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3257*12f080e7Smrj 			    *copybuf_used;
3258*12f080e7Smrj 		} else {
3259*12f080e7Smrj 			if (copybuf_sz_power_2) {
3260*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3261*12f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
3262*12f080e7Smrj 				    (*copybuf_used &
3263*12f080e7Smrj 				    (dma->dp_copybuf_size - 1)));
3264*12f080e7Smrj 			} else {
3265*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3266*12f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
3267*12f080e7Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
3268*12f080e7Smrj 			}
3269*12f080e7Smrj 		}
3270*12f080e7Smrj 
3271*12f080e7Smrj 		/*
3272*12f080e7Smrj 		 * over write the cookie physical address with the address of
3273*12f080e7Smrj 		 * the physical address of the copy buffer page that we will
3274*12f080e7Smrj 		 * use.
3275*12f080e7Smrj 		 */
3276*12f080e7Smrj 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3277*12f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3278*12f080e7Smrj 
3279*12f080e7Smrj 		/* if we have a kernel VA, it's easy, just save that address */
3280*12f080e7Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3281*12f080e7Smrj 		    (sinfo->si_asp == &kas)) {
3282*12f080e7Smrj 			/*
3283*12f080e7Smrj 			 * save away the page aligned virtual address of the
3284*12f080e7Smrj 			 * driver buffer. Offsets are handled in the sync code.
3285*12f080e7Smrj 			 */
3286*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3287*12f080e7Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3288*12f080e7Smrj 			    & MMU_PAGEMASK);
3289*12f080e7Smrj #if !defined(__amd64)
3290*12f080e7Smrj 			/*
3291*12f080e7Smrj 			 * we didn't need to, and will never need to map this
3292*12f080e7Smrj 			 * page.
3293*12f080e7Smrj 			 */
3294*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3295*12f080e7Smrj #endif
3296*12f080e7Smrj 
3297*12f080e7Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
3298*12f080e7Smrj 		} else {
3299*12f080e7Smrj #if defined(__amd64)
3300*12f080e7Smrj 			/*
3301*12f080e7Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3302*12f080e7Smrj 			 * get a Kernel VA for the corresponding pfn.
3303*12f080e7Smrj 			 */
3304*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3305*12f080e7Smrj #else
3306*12f080e7Smrj 			/*
3307*12f080e7Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
3308*12f080e7Smrj 			 * save away the page_t or user VA for this page. This
3309*12f080e7Smrj 			 * is needed in rootnex_dma_win() when we switch to a
3310*12f080e7Smrj 			 * new window which requires us to re-map the copy
3311*12f080e7Smrj 			 * buffer.
3312*12f080e7Smrj 			 */
3313*12f080e7Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3314*12f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3315*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3316*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3317*12f080e7Smrj 			} else if (pplist != NULL) {
3318*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3319*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3320*12f080e7Smrj 			} else {
3321*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
3322*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3323*12f080e7Smrj 				    (((uintptr_t)
3324*12f080e7Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
3325*12f080e7Smrj 				    cur_offset) & MMU_PAGEMASK);
3326*12f080e7Smrj 			}
3327*12f080e7Smrj 
3328*12f080e7Smrj 			/*
3329*12f080e7Smrj 			 * save away the page aligned virtual address which was
3330*12f080e7Smrj 			 * allocated from the kernel heap arena (taking into
3331*12f080e7Smrj 			 * account if we need more copy buffer than we alloced
3332*12f080e7Smrj 			 * and use multiple windows to handle this, i.e. &,%).
3333*12f080e7Smrj 			 * NOTE: there isn't and physical memory backing up this
3334*12f080e7Smrj 			 * virtual address space currently.
3335*12f080e7Smrj 			 */
3336*12f080e7Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
3337*12f080e7Smrj 			    dma->dp_copybuf_size) {
3338*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3339*12f080e7Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3340*12f080e7Smrj 				    MMU_PAGEMASK);
3341*12f080e7Smrj 			} else {
3342*12f080e7Smrj 				if (copybuf_sz_power_2) {
3343*12f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3344*12f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
3345*12f080e7Smrj 					    (*copybuf_used &
3346*12f080e7Smrj 					    (dma->dp_copybuf_size - 1))) &
3347*12f080e7Smrj 					    MMU_PAGEMASK);
3348*12f080e7Smrj 				} else {
3349*12f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3350*12f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
3351*12f080e7Smrj 					    (*copybuf_used %
3352*12f080e7Smrj 					    dma->dp_copybuf_size)) &
3353*12f080e7Smrj 					    MMU_PAGEMASK);
3354*12f080e7Smrj 				}
3355*12f080e7Smrj 			}
3356*12f080e7Smrj 
3357*12f080e7Smrj 			/*
3358*12f080e7Smrj 			 * if we haven't used up the available copy buffer yet,
3359*12f080e7Smrj 			 * map the kva to the physical page.
3360*12f080e7Smrj 			 */
3361*12f080e7Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3362*12f080e7Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3363*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3364*12f080e7Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3365*12f080e7Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3366*12f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3367*12f080e7Smrj 				} else {
3368*12f080e7Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3369*12f080e7Smrj 					    sinfo->si_asp,
3370*12f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3371*12f080e7Smrj 				}
3372*12f080e7Smrj 
3373*12f080e7Smrj 			/*
3374*12f080e7Smrj 			 * we've used up the available copy buffer, this page
3375*12f080e7Smrj 			 * will have to be mapped during rootnex_dma_win() when
3376*12f080e7Smrj 			 * we switch to a new window which requires a re-map
3377*12f080e7Smrj 			 * the copy buffer. (32-bit kernel only)
3378*12f080e7Smrj 			 */
3379*12f080e7Smrj 			} else {
3380*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3381*12f080e7Smrj 			}
3382*12f080e7Smrj #endif
3383*12f080e7Smrj 			/* go to the next page_t */
3384*12f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3385*12f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
3386*12f080e7Smrj 			}
3387*12f080e7Smrj 		}
3388*12f080e7Smrj 
3389*12f080e7Smrj 		/* add to the copy buffer count */
3390*12f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
3391*12f080e7Smrj 
3392*12f080e7Smrj 	/*
3393*12f080e7Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3394*12f080e7Smrj 	 * cookie occupies to reflect this.
3395*12f080e7Smrj 	 */
3396*12f080e7Smrj 	} else {
3397*12f080e7Smrj 		/*
3398*12f080e7Smrj 		 * figure out how many pages the cookie occupies. We need to
3399*12f080e7Smrj 		 * use the original page offset of the buffer and the cookies
3400*12f080e7Smrj 		 * offset in the buffer to do this.
3401*12f080e7Smrj 		 */
3402*12f080e7Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3403*12f080e7Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3404*12f080e7Smrj 
3405*12f080e7Smrj 		while (pcnt > 0) {
3406*12f080e7Smrj #if !defined(__amd64)
3407*12f080e7Smrj 			/*
3408*12f080e7Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
3409*12f080e7Smrj 			 * to map in the driver buffer (if it didn't come down
3410*12f080e7Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
3411*12f080e7Smrj 			 * use the copy buffer, it's not, or will it ever, have
3412*12f080e7Smrj 			 * to be mapped in.
3413*12f080e7Smrj 			 */
3414*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3415*12f080e7Smrj #endif
3416*12f080e7Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3417*12f080e7Smrj 
3418*12f080e7Smrj 			/*
3419*12f080e7Smrj 			 * we need to update pidx and cur_pp or we'll loose
3420*12f080e7Smrj 			 * track of where we are.
3421*12f080e7Smrj 			 */
3422*12f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3423*12f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
3424*12f080e7Smrj 			}
3425*12f080e7Smrj 			pidx++;
3426*12f080e7Smrj 			pcnt--;
3427*12f080e7Smrj 		}
3428*12f080e7Smrj 	}
3429*12f080e7Smrj }
3430*12f080e7Smrj 
3431*12f080e7Smrj 
3432*12f080e7Smrj /*
3433*12f080e7Smrj  * rootnex_sgllen_window_boundary()
3434*12f080e7Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
3435*12f080e7Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3436*12f080e7Smrj  *    length supported by the DMA H/W.
3437*12f080e7Smrj  */
3438*12f080e7Smrj static int
3439*12f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3440*12f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3441*12f080e7Smrj     off_t cur_offset)
3442*12f080e7Smrj {
3443*12f080e7Smrj 	off_t new_offset;
3444*12f080e7Smrj 	size_t trim_sz;
3445*12f080e7Smrj 	off_t coffset;
3446*12f080e7Smrj 
3447*12f080e7Smrj 
3448*12f080e7Smrj 	/*
3449*12f080e7Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3450*12f080e7Smrj 	 * the next window and init it. We're done.
3451*12f080e7Smrj 	 */
3452*12f080e7Smrj 	if (!dma->dp_trim_required) {
3453*12f080e7Smrj 		(*windowp)++;
3454*12f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3455*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3456*12f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3457*12f080e7Smrj 		return (DDI_SUCCESS);
3458*12f080e7Smrj 	}
3459*12f080e7Smrj 
3460*12f080e7Smrj 	/* figure out how much we need to trim from the window */
3461*12f080e7Smrj 	ASSERT(attr->dma_attr_granular != 0);
3462*12f080e7Smrj 	if (dma->dp_granularity_power_2) {
3463*12f080e7Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3464*12f080e7Smrj 	} else {
3465*12f080e7Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3466*12f080e7Smrj 	}
3467*12f080e7Smrj 
3468*12f080e7Smrj 	/* The window's a whole multiple of granularity. We're done */
3469*12f080e7Smrj 	if (trim_sz == 0) {
3470*12f080e7Smrj 		(*windowp)++;
3471*12f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3472*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3473*12f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3474*12f080e7Smrj 		return (DDI_SUCCESS);
3475*12f080e7Smrj 	}
3476*12f080e7Smrj 
3477*12f080e7Smrj 	/*
3478*12f080e7Smrj 	 * The window's not a whole multiple of granularity, since we know this
3479*12f080e7Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
3480*12f080e7Smrj 	 * that one, add the left over part of the old cookie into the new
3481*12f080e7Smrj 	 * window, and then add in the new cookie into the new window.
3482*12f080e7Smrj 	 */
3483*12f080e7Smrj 
3484*12f080e7Smrj 	/*
3485*12f080e7Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
3486*12f080e7Smrj 	 * sgllen == 1 don't go together.
3487*12f080e7Smrj 	 */
3488*12f080e7Smrj 	if (attr->dma_attr_sgllen == 1) {
3489*12f080e7Smrj 		return (DDI_DMA_NOMAPPING);
3490*12f080e7Smrj 	}
3491*12f080e7Smrj 
3492*12f080e7Smrj 	/*
3493*12f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
3494*12f080e7Smrj 	 * back to the last cookie for this.
3495*12f080e7Smrj 	 */
3496*12f080e7Smrj 	cookie--;
3497*12f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3498*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3499*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3500*12f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3501*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3502*12f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
3503*12f080e7Smrj 
3504*12f080e7Smrj 	/* save the buffer offsets for the next window */
3505*12f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
3506*12f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3507*12f080e7Smrj 
3508*12f080e7Smrj 	/*
3509*12f080e7Smrj 	 * set this now in case this is the first window. all other cases are
3510*12f080e7Smrj 	 * set in dma_win()
3511*12f080e7Smrj 	 */
3512*12f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3513*12f080e7Smrj 
3514*12f080e7Smrj 	/*
3515*12f080e7Smrj 	 * initialize the next window using what's left over in the previous
3516*12f080e7Smrj 	 * cookie.
3517*12f080e7Smrj 	 */
3518*12f080e7Smrj 	(*windowp)++;
3519*12f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3520*12f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
3521*12f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3522*12f080e7Smrj 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3523*12f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3524*12f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3525*12f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
3526*12f080e7Smrj 	}
3527*12f080e7Smrj 
3528*12f080e7Smrj 	/*
3529*12f080e7Smrj 	 * now go back to the current cookie and add it to the new window. set
3530*12f080e7Smrj 	 * the new window size to the what was left over from the previous
3531*12f080e7Smrj 	 * cookie and what's in the current cookie.
3532*12f080e7Smrj 	 */
3533*12f080e7Smrj 	cookie++;
3534*12f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
3535*12f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3536*12f080e7Smrj 
3537*12f080e7Smrj 	/*
3538*12f080e7Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3539*12f080e7Smrj 	 * a max size of maxxfer). Handle that case.
3540*12f080e7Smrj 	 */
3541*12f080e7Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3542*12f080e7Smrj 		/*
3543*12f080e7Smrj 		 * maxxfer is already a whole multiple of granularity, and this
3544*12f080e7Smrj 		 * trim will be <= the previous trim (since a cookie can't be
3545*12f080e7Smrj 		 * larger than maxxfer). Make things simple here.
3546*12f080e7Smrj 		 */
3547*12f080e7Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3548*12f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3549*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3550*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3551*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3552*12f080e7Smrj 		(*windowp)->wd_size -= trim_sz;
3553*12f080e7Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3554*12f080e7Smrj 
3555*12f080e7Smrj 		/* save the buffer offsets for the next window */
3556*12f080e7Smrj 		coffset = cookie->dmac_size - trim_sz;
3557*12f080e7Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3558*12f080e7Smrj 
3559*12f080e7Smrj 		/* setup the next window */
3560*12f080e7Smrj 		(*windowp)++;
3561*12f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3562*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3563*12f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3564*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3565*12f080e7Smrj 		    coffset;
3566*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3567*12f080e7Smrj 	}
3568*12f080e7Smrj 
3569*12f080e7Smrj 	return (DDI_SUCCESS);
3570*12f080e7Smrj }
3571*12f080e7Smrj 
3572*12f080e7Smrj 
3573*12f080e7Smrj /*
3574*12f080e7Smrj  * rootnex_copybuf_window_boundary()
3575*12f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we used
3576*12f080e7Smrj  *    up all the copy buffer that we have.
3577*12f080e7Smrj  */
3578*12f080e7Smrj static int
3579*12f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3580*12f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3581*12f080e7Smrj     size_t *copybuf_used)
3582*12f080e7Smrj {
3583*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
3584*12f080e7Smrj 	off_t new_offset;
3585*12f080e7Smrj 	size_t trim_sz;
3586*12f080e7Smrj 	off_t coffset;
3587*12f080e7Smrj 	uint_t pidx;
3588*12f080e7Smrj 	off_t poff;
3589*12f080e7Smrj 
3590*12f080e7Smrj 
3591*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
3592*12f080e7Smrj 
3593*12f080e7Smrj 	/*
3594*12f080e7Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
3595*12f080e7Smrj 	 * this cookie is <= MMU_PAGESIZE.
3596*12f080e7Smrj 	 */
3597*12f080e7Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3598*12f080e7Smrj 
3599*12f080e7Smrj 	/*
3600*12f080e7Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
3601*12f080e7Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3602*12f080e7Smrj 	 * space...
3603*12f080e7Smrj 	 */
3604*12f080e7Smrj #if !defined(__amd64)
3605*12f080e7Smrj 	dma->dp_cb_remaping = B_TRUE;
3606*12f080e7Smrj #endif
3607*12f080e7Smrj 
3608*12f080e7Smrj 	/* reset copybuf used */
3609*12f080e7Smrj 	*copybuf_used = 0;
3610*12f080e7Smrj 
3611*12f080e7Smrj 	/*
3612*12f080e7Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
3613*12f080e7Smrj 	 * next window and add the current cookie to it. We know the current
3614*12f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
3615*12f080e7Smrj 	 */
3616*12f080e7Smrj 	if (!dma->dp_trim_required) {
3617*12f080e7Smrj 		(*windowp)++;
3618*12f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3619*12f080e7Smrj 
3620*12f080e7Smrj 		/* Add this cookie to the new window */
3621*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3622*12f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3623*12f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
3624*12f080e7Smrj 		return (DDI_SUCCESS);
3625*12f080e7Smrj 	}
3626*12f080e7Smrj 
3627*12f080e7Smrj 	/*
3628*12f080e7Smrj 	 * *** may need to trim, figure it out.
3629*12f080e7Smrj 	 */
3630*12f080e7Smrj 
3631*12f080e7Smrj 	/* figure out how much we need to trim from the window */
3632*12f080e7Smrj 	if (dma->dp_granularity_power_2) {
3633*12f080e7Smrj 		trim_sz = (*windowp)->wd_size &
3634*12f080e7Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
3635*12f080e7Smrj 	} else {
3636*12f080e7Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3637*12f080e7Smrj 	}
3638*12f080e7Smrj 
3639*12f080e7Smrj 	/*
3640*12f080e7Smrj 	 * if the window's a whole multiple of granularity, go to the next
3641*12f080e7Smrj 	 * window, init it, then add in the current cookie. We know the current
3642*12f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
3643*12f080e7Smrj 	 */
3644*12f080e7Smrj 	if (trim_sz == 0) {
3645*12f080e7Smrj 		(*windowp)++;
3646*12f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3647*12f080e7Smrj 
3648*12f080e7Smrj 		/* Add this cookie to the new window */
3649*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3650*12f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3651*12f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
3652*12f080e7Smrj 		return (DDI_SUCCESS);
3653*12f080e7Smrj 	}
3654*12f080e7Smrj 
3655*12f080e7Smrj 	/*
3656*12f080e7Smrj 	 * *** We figured it out, we definitly need to trim
3657*12f080e7Smrj 	 */
3658*12f080e7Smrj 
3659*12f080e7Smrj 	/*
3660*12f080e7Smrj 	 * make sure the driver isn't making us do something bad...
3661*12f080e7Smrj 	 * Trimming and sgllen == 1 don't go together.
3662*12f080e7Smrj 	 */
3663*12f080e7Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3664*12f080e7Smrj 		return (DDI_DMA_NOMAPPING);
3665*12f080e7Smrj 	}
3666*12f080e7Smrj 
3667*12f080e7Smrj 	/*
3668*12f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
3669*12f080e7Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
3670*12f080e7Smrj 	 * the current window, and some of the last cookie will be in the new
3671*12f080e7Smrj 	 * window. All of the current cookie will be in the new window.
3672*12f080e7Smrj 	 */
3673*12f080e7Smrj 	cookie--;
3674*12f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3675*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3676*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3677*12f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3678*12f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3679*12f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
3680*12f080e7Smrj 
3681*12f080e7Smrj 	/*
3682*12f080e7Smrj 	 * we're trimming the last cookie (not the current cookie). So that
3683*12f080e7Smrj 	 * last cookie may have or may not have been using the copy buffer (
3684*12f080e7Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
3685*12f080e7Smrj 	 * this code path).
3686*12f080e7Smrj 	 *
3687*12f080e7Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
3688*12f080e7Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
3689*12f080e7Smrj 	 * last page in the current window and the first page in the next
3690*12f080e7Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
3691*12f080e7Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3692*12f080e7Smrj 	 * current window, and the start of the copy buffer in the next window.
3693*12f080e7Smrj 	 * Track that info... The cookie physical address was already set to
3694*12f080e7Smrj 	 * the copy buffer physical address in setup_cookie..
3695*12f080e7Smrj 	 */
3696*12f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3697*12f080e7Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3698*12f080e7Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3699*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3700*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3701*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
3702*12f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
3703*12f080e7Smrj #if !defined(__amd64)
3704*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
3705*12f080e7Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
3706*12f080e7Smrj #endif
3707*12f080e7Smrj 	}
3708*12f080e7Smrj 
3709*12f080e7Smrj 	/* save the buffer offsets for the next window */
3710*12f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
3711*12f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3712*12f080e7Smrj 
3713*12f080e7Smrj 	/*
3714*12f080e7Smrj 	 * set this now in case this is the first window. all other cases are
3715*12f080e7Smrj 	 * set in dma_win()
3716*12f080e7Smrj 	 */
3717*12f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3718*12f080e7Smrj 
3719*12f080e7Smrj 	/*
3720*12f080e7Smrj 	 * initialize the next window using what's left over in the previous
3721*12f080e7Smrj 	 * cookie.
3722*12f080e7Smrj 	 */
3723*12f080e7Smrj 	(*windowp)++;
3724*12f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3725*12f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
3726*12f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3727*12f080e7Smrj 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3728*12f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3729*12f080e7Smrj 
3730*12f080e7Smrj 	/*
3731*12f080e7Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
3732*12f080e7Smrj 	 * read the comment above for more info on why we need to track
3733*12f080e7Smrj 	 * additional state.
3734*12f080e7Smrj 	 *
3735*12f080e7Smrj 	 * For the first cookie in the new window, we need reset the physical
3736*12f080e7Smrj 	 * address to DMA into to the start of the copy buffer plus any
3737*12f080e7Smrj 	 * initial page offset which may be present.
3738*12f080e7Smrj 	 */
3739*12f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3740*12f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
3741*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3742*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3743*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3744*12f080e7Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
3745*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = ptob64(hat_getpfnum(
3746*12f080e7Smrj 		    kas.a_hat, dma->dp_cbaddr)) + poff;
3747*12f080e7Smrj #if !defined(__amd64)
3748*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3749*12f080e7Smrj #endif
3750*12f080e7Smrj 		/* account for the cookie copybuf usage in the new window */
3751*12f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
3752*12f080e7Smrj 
3753*12f080e7Smrj 		/*
3754*12f080e7Smrj 		 * every piece of code has to have a hack, and here is this
3755*12f080e7Smrj 		 * ones :-)
3756*12f080e7Smrj 		 *
3757*12f080e7Smrj 		 * There is a complex interaction between setup_cookie and the
3758*12f080e7Smrj 		 * copybuf window boundary. The complexity had to be in either
3759*12f080e7Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
3760*12f080e7Smrj 		 * copybuf code.
3761*12f080e7Smrj 		 *
3762*12f080e7Smrj 		 * So in this code path, we have taken the last cookie,
3763*12f080e7Smrj 		 * virtually broken it in half due to the trim, and it happens
3764*12f080e7Smrj 		 * to use the copybuf which further complicates life. At the
3765*12f080e7Smrj 		 * same time, we have already setup the current cookie, which
3766*12f080e7Smrj 		 * is now wrong. More background info: the current cookie uses
3767*12f080e7Smrj 		 * the copybuf, so it is only a page long max. So we need to
3768*12f080e7Smrj 		 * fix the current cookies copy buffer address, physical
3769*12f080e7Smrj 		 * address, and kva for the 32-bit kernel. We due this by
3770*12f080e7Smrj 		 * bumping them by page size (of course, we can't due this on
3771*12f080e7Smrj 		 * the physical address since the copy buffer may not be
3772*12f080e7Smrj 		 * physically contiguous).
3773*12f080e7Smrj 		 */
3774*12f080e7Smrj 		cookie++;
3775*12f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
3776*12f080e7Smrj 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
3777*12f080e7Smrj 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3778*12f080e7Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
3779*12f080e7Smrj #if !defined(__amd64)
3780*12f080e7Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3781*12f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3782*12f080e7Smrj #endif
3783*12f080e7Smrj 	} else {
3784*12f080e7Smrj 		/* go back to the current cookie */
3785*12f080e7Smrj 		cookie++;
3786*12f080e7Smrj 	}
3787*12f080e7Smrj 
3788*12f080e7Smrj 	/*
3789*12f080e7Smrj 	 * add the current cookie to the new window. set the new window size to
3790*12f080e7Smrj 	 * the what was left over from the previous cookie and what's in the
3791*12f080e7Smrj 	 * current cookie.
3792*12f080e7Smrj 	 */
3793*12f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
3794*12f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3795*12f080e7Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
3796*12f080e7Smrj 
3797*12f080e7Smrj 	/*
3798*12f080e7Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
3799*12f080e7Smrj 	 * wouldn't be here if it didn't.
3800*12f080e7Smrj 	 */
3801*12f080e7Smrj 	*copybuf_used += MMU_PAGESIZE;
3802*12f080e7Smrj 
3803*12f080e7Smrj 	return (DDI_SUCCESS);
3804*12f080e7Smrj }
3805*12f080e7Smrj 
3806*12f080e7Smrj 
3807*12f080e7Smrj /*
3808*12f080e7Smrj  * rootnex_maxxfer_window_boundary()
3809*12f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we will
3810*12f080e7Smrj  *    go over maxxfer.
3811*12f080e7Smrj  */
3812*12f080e7Smrj static int
3813*12f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3814*12f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
3815*12f080e7Smrj {
3816*12f080e7Smrj 	size_t dmac_size;
3817*12f080e7Smrj 	off_t new_offset;
3818*12f080e7Smrj 	size_t trim_sz;
3819*12f080e7Smrj 	off_t coffset;
3820*12f080e7Smrj 
3821*12f080e7Smrj 
3822*12f080e7Smrj 	/*
3823*12f080e7Smrj 	 * calculate how much we have to trim off of the current cookie to equal
3824*12f080e7Smrj 	 * maxxfer. We don't have to account for granularity here since our
3825*12f080e7Smrj 	 * maxxfer already takes that into account.
3826*12f080e7Smrj 	 */
3827*12f080e7Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
3828*12f080e7Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
3829*12f080e7Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
3830*12f080e7Smrj 
3831*12f080e7Smrj 	/* save cookie size since we need it later and we might change it */
3832*12f080e7Smrj 	dmac_size = cookie->dmac_size;
3833*12f080e7Smrj 
3834*12f080e7Smrj 	/*
3835*12f080e7Smrj 	 * if we're not trimming the entire cookie, setup the current window to
3836*12f080e7Smrj 	 * account for the trim.
3837*12f080e7Smrj 	 */
3838*12f080e7Smrj 	if (trim_sz < cookie->dmac_size) {
3839*12f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
3840*12f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3841*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3842*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3843*12f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3844*12f080e7Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
3845*12f080e7Smrj 
3846*12f080e7Smrj 		/*
3847*12f080e7Smrj 		 * set the adjusted cookie size now in case this is the first
3848*12f080e7Smrj 		 * window. All other windows are taken care of in get win
3849*12f080e7Smrj 		 */
3850*12f080e7Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3851*12f080e7Smrj 	}
3852*12f080e7Smrj 
3853*12f080e7Smrj 	/*
3854*12f080e7Smrj 	 * coffset is the current offset within the cookie, new_offset is the
3855*12f080e7Smrj 	 * current offset with the entire buffer.
3856*12f080e7Smrj 	 */
3857*12f080e7Smrj 	coffset = dmac_size - trim_sz;
3858*12f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3859*12f080e7Smrj 
3860*12f080e7Smrj 	/* initialize the next window */
3861*12f080e7Smrj 	(*windowp)++;
3862*12f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3863*12f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
3864*12f080e7Smrj 	(*windowp)->wd_size = trim_sz;
3865*12f080e7Smrj 	if (trim_sz < dmac_size) {
3866*12f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3867*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3868*12f080e7Smrj 		    coffset;
3869*12f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3870*12f080e7Smrj 	}
3871*12f080e7Smrj 
3872*12f080e7Smrj 	return (DDI_SUCCESS);
3873*12f080e7Smrj }
3874*12f080e7Smrj 
3875*12f080e7Smrj 
3876*12f080e7Smrj /*
3877*12f080e7Smrj  * rootnex_dma_sync()
3878*12f080e7Smrj  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
3879*12f080e7Smrj  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
3880*12f080e7Smrj  *    is set, ddi_dma_sync() returns immediately passing back success.
3881*12f080e7Smrj  */
3882*12f080e7Smrj /*ARGSUSED*/
3883*12f080e7Smrj static int
3884*12f080e7Smrj rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3885*12f080e7Smrj     off_t off, size_t len, uint_t cache_flags)
3886*12f080e7Smrj {
3887*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
3888*12f080e7Smrj 	rootnex_pgmap_t *cbpage;
3889*12f080e7Smrj 	rootnex_window_t *win;
3890*12f080e7Smrj 	ddi_dma_impl_t *hp;
3891*12f080e7Smrj 	rootnex_dma_t *dma;
3892*12f080e7Smrj 	caddr_t fromaddr;
3893*12f080e7Smrj 	caddr_t toaddr;
3894*12f080e7Smrj 	uint_t psize;
3895*12f080e7Smrj 	off_t offset;
3896*12f080e7Smrj 	uint_t pidx;
3897*12f080e7Smrj 	size_t size;
3898*12f080e7Smrj 	off_t poff;
3899*12f080e7Smrj 	int e;
3900*12f080e7Smrj 
3901*12f080e7Smrj 
3902*12f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
3903*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
3904*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
3905*12f080e7Smrj 
3906*12f080e7Smrj 	/*
3907*12f080e7Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
3908*12f080e7Smrj 	 * will cause us to have at least one window.
3909*12f080e7Smrj 	 */
3910*12f080e7Smrj 	if (dma->dp_window == NULL) {
3911*12f080e7Smrj 		return (DDI_SUCCESS);
3912*12f080e7Smrj 	}
3913*12f080e7Smrj 
3914*12f080e7Smrj 	/* This window may not need to be sync'd */
3915*12f080e7Smrj 	win = &dma->dp_window[dma->dp_current_win];
3916*12f080e7Smrj 	if (!win->wd_dosync) {
3917*12f080e7Smrj 		return (DDI_SUCCESS);
3918*12f080e7Smrj 	}
3919*12f080e7Smrj 
3920*12f080e7Smrj 	/* handle off and len special cases */
3921*12f080e7Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
3922*12f080e7Smrj 		offset = win->wd_offset;
3923*12f080e7Smrj 	} else {
3924*12f080e7Smrj 		offset = off;
3925*12f080e7Smrj 	}
3926*12f080e7Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
3927*12f080e7Smrj 		size = win->wd_size;
3928*12f080e7Smrj 	} else {
3929*12f080e7Smrj 		size = len;
3930*12f080e7Smrj 	}
3931*12f080e7Smrj 
3932*12f080e7Smrj 	/* check the sync args to make sure they make a little sense */
3933*12f080e7Smrj 	if (rootnex_sync_check_parms) {
3934*12f080e7Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
3935*12f080e7Smrj 		    cache_flags);
3936*12f080e7Smrj 		if (e != DDI_SUCCESS) {
3937*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
3938*12f080e7Smrj 			return (DDI_FAILURE);
3939*12f080e7Smrj 		}
3940*12f080e7Smrj 	}
3941*12f080e7Smrj 
3942*12f080e7Smrj 	/*
3943*12f080e7Smrj 	 * special case the first page to handle the offset into the page. The
3944*12f080e7Smrj 	 * offset to the current page for our buffer is the offset into the
3945*12f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
3946*12f080e7Smrj 	 * itself, masked of course.
3947*12f080e7Smrj 	 */
3948*12f080e7Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
3949*12f080e7Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
3950*12f080e7Smrj 
3951*12f080e7Smrj 	/* go through all the pages that we want to sync */
3952*12f080e7Smrj 	while (size > 0) {
3953*12f080e7Smrj 		/*
3954*12f080e7Smrj 		 * Calculate the page index relative to the start of the buffer.
3955*12f080e7Smrj 		 * The index to the current page for our buffer is the offset
3956*12f080e7Smrj 		 * into the first page of the buffer plus our current offset
3957*12f080e7Smrj 		 * into the buffer itself, shifted of course...
3958*12f080e7Smrj 		 */
3959*12f080e7Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
3960*12f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
3961*12f080e7Smrj 
3962*12f080e7Smrj 		/*
3963*12f080e7Smrj 		 * if this page uses the copy buffer, we need to sync it,
3964*12f080e7Smrj 		 * otherwise, go on to the next page.
3965*12f080e7Smrj 		 */
3966*12f080e7Smrj 		cbpage = &dma->dp_pgmap[pidx];
3967*12f080e7Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
3968*12f080e7Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
3969*12f080e7Smrj 		if (cbpage->pm_uses_copybuf) {
3970*12f080e7Smrj 			/* cbaddr and kaddr should be page aligned */
3971*12f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
3972*12f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
3973*12f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
3974*12f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
3975*12f080e7Smrj 
3976*12f080e7Smrj 			/*
3977*12f080e7Smrj 			 * if we're copying for the device, we are going to
3978*12f080e7Smrj 			 * copy from the drivers buffer and to the rootnex
3979*12f080e7Smrj 			 * allocated copy buffer.
3980*12f080e7Smrj 			 */
3981*12f080e7Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
3982*12f080e7Smrj 				fromaddr = cbpage->pm_kaddr + poff;
3983*12f080e7Smrj 				toaddr = cbpage->pm_cbaddr + poff;
3984*12f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
3985*12f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
3986*12f080e7Smrj 
3987*12f080e7Smrj 			/*
3988*12f080e7Smrj 			 * if we're copying for the cpu/kernel, we are going to
3989*12f080e7Smrj 			 * copy from the rootnex allocated copy buffer to the
3990*12f080e7Smrj 			 * drivers buffer.
3991*12f080e7Smrj 			 */
3992*12f080e7Smrj 			} else {
3993*12f080e7Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
3994*12f080e7Smrj 				toaddr = cbpage->pm_kaddr + poff;
3995*12f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
3996*12f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
3997*12f080e7Smrj 			}
3998*12f080e7Smrj 
3999*12f080e7Smrj 			bcopy(fromaddr, toaddr, psize);
4000*12f080e7Smrj 		}
4001*12f080e7Smrj 
4002*12f080e7Smrj 		/*
4003*12f080e7Smrj 		 * decrement size until we're done, update our offset into the
4004*12f080e7Smrj 		 * buffer, and get the next page size.
4005*12f080e7Smrj 		 */
4006*12f080e7Smrj 		size -= psize;
4007*12f080e7Smrj 		offset += psize;
4008*12f080e7Smrj 		psize = MIN(MMU_PAGESIZE, size);
4009*12f080e7Smrj 
4010*12f080e7Smrj 		/* page offset is zero for the rest of this loop */
4011*12f080e7Smrj 		poff = 0;
4012*12f080e7Smrj 	}
4013*12f080e7Smrj 
4014*12f080e7Smrj 	return (DDI_SUCCESS);
4015*12f080e7Smrj }
4016*12f080e7Smrj 
4017*12f080e7Smrj 
4018*12f080e7Smrj /*
4019*12f080e7Smrj  * rootnex_valid_sync_parms()
4020*12f080e7Smrj  *    checks the parameters passed to sync to verify they are correct.
4021*12f080e7Smrj  */
4022*12f080e7Smrj static int
4023*12f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4024*12f080e7Smrj     off_t offset, size_t size, uint_t cache_flags)
4025*12f080e7Smrj {
4026*12f080e7Smrj 	off_t woffset;
4027*12f080e7Smrj 
4028*12f080e7Smrj 
4029*12f080e7Smrj 	/*
4030*12f080e7Smrj 	 * the first part of the test to make sure the offset passed in is
4031*12f080e7Smrj 	 * within the window.
4032*12f080e7Smrj 	 */
4033*12f080e7Smrj 	if (offset < win->wd_offset) {
4034*12f080e7Smrj 		return (DDI_FAILURE);
4035*12f080e7Smrj 	}
4036*12f080e7Smrj 
4037*12f080e7Smrj 	/*
4038*12f080e7Smrj 	 * second and last part of the test to make sure the offset and length
4039*12f080e7Smrj 	 * passed in is within the window.
4040*12f080e7Smrj 	 */
4041*12f080e7Smrj 	woffset = offset - win->wd_offset;
4042*12f080e7Smrj 	if ((woffset + size) > win->wd_size) {
4043*12f080e7Smrj 		return (DDI_FAILURE);
4044*12f080e7Smrj 	}
4045*12f080e7Smrj 
4046*12f080e7Smrj 	/*
4047*12f080e7Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4048*12f080e7Smrj 	 * be set too.
4049*12f080e7Smrj 	 */
4050*12f080e7Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4051*12f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4052*12f080e7Smrj 		return (DDI_SUCCESS);
4053*12f080e7Smrj 	}
4054*12f080e7Smrj 
4055*12f080e7Smrj 	/*
4056*12f080e7Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4057*12f080e7Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4058*12f080e7Smrj 	 */
4059*12f080e7Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4060*12f080e7Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4061*12f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4062*12f080e7Smrj 		return (DDI_SUCCESS);
4063*12f080e7Smrj 	}
4064*12f080e7Smrj 
4065*12f080e7Smrj 	return (DDI_FAILURE);
4066*12f080e7Smrj }
4067*12f080e7Smrj 
4068*12f080e7Smrj 
4069*12f080e7Smrj /*
4070*12f080e7Smrj  * rootnex_dma_win()
4071*12f080e7Smrj  *    called from ddi_dma_getwin()
4072*12f080e7Smrj  */
4073*12f080e7Smrj /*ARGSUSED*/
4074*12f080e7Smrj static int
4075*12f080e7Smrj rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4076*12f080e7Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4077*12f080e7Smrj     uint_t *ccountp)
4078*12f080e7Smrj {
4079*12f080e7Smrj 	rootnex_window_t *window;
4080*12f080e7Smrj 	rootnex_trim_t *trim;
4081*12f080e7Smrj 	ddi_dma_impl_t *hp;
4082*12f080e7Smrj 	rootnex_dma_t *dma;
4083*12f080e7Smrj #if !defined(__amd64)
4084*12f080e7Smrj 	rootnex_sglinfo_t *sinfo;
4085*12f080e7Smrj 	rootnex_pgmap_t *pmap;
4086*12f080e7Smrj 	uint_t pidx;
4087*12f080e7Smrj 	uint_t pcnt;
4088*12f080e7Smrj 	off_t poff;
4089*12f080e7Smrj 	int i;
4090*12f080e7Smrj #endif
4091*12f080e7Smrj 
4092*12f080e7Smrj 
4093*12f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
4094*12f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4095*12f080e7Smrj #if !defined(__amd64)
4096*12f080e7Smrj 	sinfo = &dma->dp_sglinfo;
4097*12f080e7Smrj #endif
4098*12f080e7Smrj 
4099*12f080e7Smrj 	/* If we try and get a window which doesn't exist, return failure */
4100*12f080e7Smrj 	if (win >= hp->dmai_nwin) {
4101*12f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4102*12f080e7Smrj 		return (DDI_FAILURE);
4103*12f080e7Smrj 	}
4104*12f080e7Smrj 
4105*12f080e7Smrj 	/*
4106*12f080e7Smrj 	 * if we don't have any windows, and they're asking for the first
4107*12f080e7Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
4108*12f080e7Smrj 	 * setup our return values, then increment the cookie since we return
4109*12f080e7Smrj 	 * the first cookie on the stack.
4110*12f080e7Smrj 	 */
4111*12f080e7Smrj 	if (dma->dp_window == NULL) {
4112*12f080e7Smrj 		if (win != 0) {
4113*12f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4114*12f080e7Smrj 			return (DDI_FAILURE);
4115*12f080e7Smrj 		}
4116*12f080e7Smrj 		hp->dmai_cookie = dma->dp_cookies;
4117*12f080e7Smrj 		*offp = 0;
4118*12f080e7Smrj 		*lenp = dma->dp_dma.dmao_size;
4119*12f080e7Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4120*12f080e7Smrj 		*cookiep = hp->dmai_cookie[0];
4121*12f080e7Smrj 		hp->dmai_cookie++;
4122*12f080e7Smrj 		return (DDI_SUCCESS);
4123*12f080e7Smrj 	}
4124*12f080e7Smrj 
4125*12f080e7Smrj 	/* sync the old window before moving on to the new one */
4126*12f080e7Smrj 	window = &dma->dp_window[dma->dp_current_win];
4127*12f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
4128*12f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4129*12f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
4130*12f080e7Smrj 	}
4131*12f080e7Smrj 
4132*12f080e7Smrj #if !defined(__amd64)
4133*12f080e7Smrj 	/*
4134*12f080e7Smrj 	 * before we move to the next window, if we need to re-map, unmap all
4135*12f080e7Smrj 	 * the pages in this window.
4136*12f080e7Smrj 	 */
4137*12f080e7Smrj 	if (dma->dp_cb_remaping) {
4138*12f080e7Smrj 		/*
4139*12f080e7Smrj 		 * If we switch to this window again, we'll need to map in
4140*12f080e7Smrj 		 * on the fly next time.
4141*12f080e7Smrj 		 */
4142*12f080e7Smrj 		window->wd_remap_copybuf = B_TRUE;
4143*12f080e7Smrj 
4144*12f080e7Smrj 		/*
4145*12f080e7Smrj 		 * calculate the page index into the buffer where this window
4146*12f080e7Smrj 		 * starts, and the number of pages this window takes up.
4147*12f080e7Smrj 		 */
4148*12f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4149*12f080e7Smrj 		    MMU_PAGESHIFT;
4150*12f080e7Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4151*12f080e7Smrj 		    MMU_PAGEOFFSET;
4152*12f080e7Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
4153*12f080e7Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4154*12f080e7Smrj 
4155*12f080e7Smrj 		/* unmap pages which are currently mapped in this window */
4156*12f080e7Smrj 		for (i = 0; i < pcnt; i++) {
4157*12f080e7Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
4158*12f080e7Smrj 				hat_unload(kas.a_hat,
4159*12f080e7Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4160*12f080e7Smrj 				    HAT_UNLOAD);
4161*12f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4162*12f080e7Smrj 			}
4163*12f080e7Smrj 			pidx++;
4164*12f080e7Smrj 		}
4165*12f080e7Smrj 	}
4166*12f080e7Smrj #endif
4167*12f080e7Smrj 
4168*12f080e7Smrj 	/*
4169*12f080e7Smrj 	 * Move to the new window.
4170*12f080e7Smrj 	 * NOTE: current_win must be set for sync to work right
4171*12f080e7Smrj 	 */
4172*12f080e7Smrj 	dma->dp_current_win = win;
4173*12f080e7Smrj 	window = &dma->dp_window[win];
4174*12f080e7Smrj 
4175*12f080e7Smrj 	/* if needed, adjust the first and/or last cookies for trim */
4176*12f080e7Smrj 	trim = &window->wd_trim;
4177*12f080e7Smrj 	if (trim->tr_trim_first) {
4178*12f080e7Smrj 		window->wd_first_cookie->_dmu._dmac_ll = trim->tr_first_paddr;
4179*12f080e7Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4180*12f080e7Smrj #if !defined(__amd64)
4181*12f080e7Smrj 		window->wd_first_cookie->dmac_type =
4182*12f080e7Smrj 		    (window->wd_first_cookie->dmac_type &
4183*12f080e7Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4184*12f080e7Smrj #endif
4185*12f080e7Smrj 		if (trim->tr_first_copybuf_win) {
4186*12f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4187*12f080e7Smrj 			    trim->tr_first_cbaddr;
4188*12f080e7Smrj #if !defined(__amd64)
4189*12f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4190*12f080e7Smrj 			    trim->tr_first_kaddr;
4191*12f080e7Smrj #endif
4192*12f080e7Smrj 		}
4193*12f080e7Smrj 	}
4194*12f080e7Smrj 	if (trim->tr_trim_last) {
4195*12f080e7Smrj 		trim->tr_last_cookie->_dmu._dmac_ll = trim->tr_last_paddr;
4196*12f080e7Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4197*12f080e7Smrj 		if (trim->tr_last_copybuf_win) {
4198*12f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4199*12f080e7Smrj 			    trim->tr_last_cbaddr;
4200*12f080e7Smrj #if !defined(__amd64)
4201*12f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4202*12f080e7Smrj 			    trim->tr_last_kaddr;
4203*12f080e7Smrj #endif
4204*12f080e7Smrj 		}
4205*12f080e7Smrj 	}
4206*12f080e7Smrj 
4207*12f080e7Smrj 	/*
4208*12f080e7Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
4209*12f080e7Smrj 	 * our return values, then increment the cookie since we return the
4210*12f080e7Smrj 	 * first cookie on the stack.
4211*12f080e7Smrj 	 */
4212*12f080e7Smrj 	hp->dmai_cookie = window->wd_first_cookie;
4213*12f080e7Smrj 	*offp = window->wd_offset;
4214*12f080e7Smrj 	*lenp = window->wd_size;
4215*12f080e7Smrj 	*ccountp = window->wd_cookie_cnt;
4216*12f080e7Smrj 	*cookiep = hp->dmai_cookie[0];
4217*12f080e7Smrj 	hp->dmai_cookie++;
4218*12f080e7Smrj 
4219*12f080e7Smrj #if !defined(__amd64)
4220*12f080e7Smrj 	/* re-map copybuf if required for this window */
4221*12f080e7Smrj 	if (dma->dp_cb_remaping) {
4222*12f080e7Smrj 		/*
4223*12f080e7Smrj 		 * calculate the page index into the buffer where this
4224*12f080e7Smrj 		 * window starts.
4225*12f080e7Smrj 		 */
4226*12f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4227*12f080e7Smrj 		    MMU_PAGESHIFT;
4228*12f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4229*12f080e7Smrj 
4230*12f080e7Smrj 		/*
4231*12f080e7Smrj 		 * the first page can get unmapped if it's shared with the
4232*12f080e7Smrj 		 * previous window. Even if the rest of this window is already
4233*12f080e7Smrj 		 * mapped in, we need to still check this one.
4234*12f080e7Smrj 		 */
4235*12f080e7Smrj 		pmap = &dma->dp_pgmap[pidx];
4236*12f080e7Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4237*12f080e7Smrj 			if (pmap->pm_pp != NULL) {
4238*12f080e7Smrj 				pmap->pm_mapped = B_TRUE;
4239*12f080e7Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4240*12f080e7Smrj 			} else if (pmap->pm_vaddr != NULL) {
4241*12f080e7Smrj 				pmap->pm_mapped = B_TRUE;
4242*12f080e7Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4243*12f080e7Smrj 				    pmap->pm_kaddr);
4244*12f080e7Smrj 			}
4245*12f080e7Smrj 		}
4246*12f080e7Smrj 		pidx++;
4247*12f080e7Smrj 
4248*12f080e7Smrj 		/* map in the rest of the pages if required */
4249*12f080e7Smrj 		if (window->wd_remap_copybuf) {
4250*12f080e7Smrj 			window->wd_remap_copybuf = B_FALSE;
4251*12f080e7Smrj 
4252*12f080e7Smrj 			/* figure out many pages this window takes up */
4253*12f080e7Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4254*12f080e7Smrj 			    MMU_PAGEOFFSET;
4255*12f080e7Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
4256*12f080e7Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4257*12f080e7Smrj 
4258*12f080e7Smrj 			/* map pages which require it */
4259*12f080e7Smrj 			for (i = 1; i < pcnt; i++) {
4260*12f080e7Smrj 				pmap = &dma->dp_pgmap[pidx];
4261*12f080e7Smrj 				if (pmap->pm_uses_copybuf) {
4262*12f080e7Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
4263*12f080e7Smrj 					if (pmap->pm_pp != NULL) {
4264*12f080e7Smrj 						pmap->pm_mapped = B_TRUE;
4265*12f080e7Smrj 						i86_pp_map(pmap->pm_pp,
4266*12f080e7Smrj 						    pmap->pm_kaddr);
4267*12f080e7Smrj 					} else if (pmap->pm_vaddr != NULL) {
4268*12f080e7Smrj 						pmap->pm_mapped = B_TRUE;
4269*12f080e7Smrj 						i86_va_map(pmap->pm_vaddr,
4270*12f080e7Smrj 						    sinfo->si_asp,
4271*12f080e7Smrj 						    pmap->pm_kaddr);
4272*12f080e7Smrj 					}
4273*12f080e7Smrj 				}
4274*12f080e7Smrj 				pidx++;
4275*12f080e7Smrj 			}
4276*12f080e7Smrj 		}
4277*12f080e7Smrj 	}
4278*12f080e7Smrj #endif
4279*12f080e7Smrj 
4280*12f080e7Smrj 	/* if the new window uses the copy buffer, sync it for the device */
4281*12f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4282*12f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4283*12f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
4284*12f080e7Smrj 	}
4285*12f080e7Smrj 
4286*12f080e7Smrj 	return (DDI_SUCCESS);
4287*12f080e7Smrj }
4288*12f080e7Smrj 
4289*12f080e7Smrj 
4290*12f080e7Smrj 
4291*12f080e7Smrj /*
4292*12f080e7Smrj  * ************************
4293*12f080e7Smrj  *  obsoleted dma routines
4294*12f080e7Smrj  * ************************
4295*12f080e7Smrj  */
4296*12f080e7Smrj 
4297*12f080e7Smrj /*
4298*12f080e7Smrj  * rootnex_dma_map()
4299*12f080e7Smrj  *    called from ddi_dma_setup()
4300*12f080e7Smrj  */
4301*12f080e7Smrj /* ARGSUSED */
4302*12f080e7Smrj static int
4303*12f080e7Smrj rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq,
4304*12f080e7Smrj     ddi_dma_handle_t *handlep)
4305*12f080e7Smrj {
4306*12f080e7Smrj #if defined(__amd64)
4307*12f080e7Smrj 	/*
4308*12f080e7Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4309*12f080e7Smrj 	 * rootnex_dma_mctl()
4310*12f080e7Smrj 	 */
4311*12f080e7Smrj 	ASSERT(0);
4312*12f080e7Smrj 	return (DDI_DMA_NORESOURCES);
4313*12f080e7Smrj 
4314*12f080e7Smrj #else /* 32-bit x86 kernel */
4315*12f080e7Smrj 	ddi_dma_handle_t *lhandlep;
4316*12f080e7Smrj 	ddi_dma_handle_t lhandle;
4317*12f080e7Smrj 	ddi_dma_cookie_t cookie;
4318*12f080e7Smrj 	ddi_dma_attr_t dma_attr;
4319*12f080e7Smrj 	ddi_dma_lim_t *dma_lim;
4320*12f080e7Smrj 	uint_t ccnt;
4321*12f080e7Smrj 	int e;
4322*12f080e7Smrj 
4323*12f080e7Smrj 
4324*12f080e7Smrj 	/*
4325*12f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4326*12f080e7Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4327*12f080e7Smrj 	 */
4328*12f080e7Smrj 	if (handlep == NULL) {
4329*12f080e7Smrj 		lhandlep = &lhandle;
4330*12f080e7Smrj 	} else {
4331*12f080e7Smrj 		lhandlep = handlep;
4332*12f080e7Smrj 	}
4333*12f080e7Smrj 
4334*12f080e7Smrj 	/* convert the limit structure to a dma_attr one */
4335*12f080e7Smrj 	dma_lim = dmareq->dmar_limits;
4336*12f080e7Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4337*12f080e7Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4338*12f080e7Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4339*12f080e7Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4340*12f080e7Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4341*12f080e7Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4342*12f080e7Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4343*12f080e7Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4344*12f080e7Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4345*12f080e7Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4346*12f080e7Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4347*12f080e7Smrj 	dma_attr.dma_attr_flags = 0;
4348*12f080e7Smrj 
4349*12f080e7Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4350*12f080e7Smrj 	    dmareq->dmar_arg, lhandlep);
4351*12f080e7Smrj 	if (e != DDI_SUCCESS) {
4352*12f080e7Smrj 		return (e);
4353*12f080e7Smrj 	}
4354*12f080e7Smrj 
4355*12f080e7Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4356*12f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4357*12f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4358*12f080e7Smrj 		return (e);
4359*12f080e7Smrj 	}
4360*12f080e7Smrj 
4361*12f080e7Smrj 	/*
4362*12f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4363*12f080e7Smrj 	 * free up the local state and return the result.
4364*12f080e7Smrj 	 */
4365*12f080e7Smrj 	if (handlep == NULL) {
4366*12f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4367*12f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4368*12f080e7Smrj 		if (e == DDI_DMA_MAPPED) {
4369*12f080e7Smrj 			return (DDI_DMA_MAPOK);
4370*12f080e7Smrj 		} else {
4371*12f080e7Smrj 			return (DDI_DMA_NOMAPPING);
4372*12f080e7Smrj 		}
4373*12f080e7Smrj 	}
4374*12f080e7Smrj 
4375*12f080e7Smrj 	return (e);
4376*12f080e7Smrj #endif /* defined(__amd64) */
4377*12f080e7Smrj }
4378*12f080e7Smrj 
4379*12f080e7Smrj 
4380*12f080e7Smrj /*
4381*12f080e7Smrj  * rootnex_dma_mctl()
4382*12f080e7Smrj  *
4383*12f080e7Smrj  */
4384*12f080e7Smrj /* ARGSUSED */
4385*12f080e7Smrj static int
4386*12f080e7Smrj rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4387*12f080e7Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4388*12f080e7Smrj     uint_t cache_flags)
4389*12f080e7Smrj {
4390*12f080e7Smrj #if defined(__amd64)
4391*12f080e7Smrj 	/*
4392*12f080e7Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4393*12f080e7Smrj 	 * common implementation in genunix, so they no longer have x86
4394*12f080e7Smrj 	 * specific functionality which called into dma_ctl.
4395*12f080e7Smrj 	 *
4396*12f080e7Smrj 	 * The rest of the obsoleted interfaces were never supported in the
4397*12f080e7Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4398*12f080e7Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4399*12f080e7Smrj 	 * implementation issues.
4400*12f080e7Smrj 	 *
4401*12f080e7Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4402*12f080e7Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4403*12f080e7Smrj 	 * reflect that now too...
4404*12f080e7Smrj 	 *
4405*12f080e7Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4406*12f080e7Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
4407*12f080e7Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4408*12f080e7Smrj 	 * that in a future release.
4409*12f080e7Smrj 	 */
4410*12f080e7Smrj 	ASSERT(0);
4411*12f080e7Smrj 	return (DDI_FAILURE);
4412*12f080e7Smrj 
4413*12f080e7Smrj #else /* 32-bit x86 kernel */
4414*12f080e7Smrj 	ddi_dma_cookie_t lcookie;
4415*12f080e7Smrj 	ddi_dma_cookie_t *cookie;
4416*12f080e7Smrj 	rootnex_window_t *window;
4417*12f080e7Smrj 	ddi_dma_impl_t *hp;
4418*12f080e7Smrj 	rootnex_dma_t *dma;
4419*12f080e7Smrj 	uint_t nwin;
4420*12f080e7Smrj 	uint_t ccnt;
4421*12f080e7Smrj 	size_t len;
4422*12f080e7Smrj 	off_t off;
4423*12f080e7Smrj 	int e;
4424*12f080e7Smrj 
4425*12f080e7Smrj 
4426*12f080e7Smrj 	/*
4427*12f080e7Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4428*12f080e7Smrj 	 * hacky since were optimizing for the current interfaces and so we can
4429*12f080e7Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
4430*12f080e7Smrj 	 * obsoleted routines someday soon.
4431*12f080e7Smrj 	 */
4432*12f080e7Smrj 
4433*12f080e7Smrj 	switch (request) {
4434*12f080e7Smrj 
4435*12f080e7Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4436*12f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
4437*12f080e7Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
4438*12f080e7Smrj 
4439*12f080e7Smrj 		/*
4440*12f080e7Smrj 		 * convert segment to cookie. We don't distinguish between the
4441*12f080e7Smrj 		 * two :-)
4442*12f080e7Smrj 		 */
4443*12f080e7Smrj 		*cookie = *hp->dmai_cookie;
4444*12f080e7Smrj 		*lenp = cookie->dmac_size;
4445*12f080e7Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4446*12f080e7Smrj 		return (DDI_SUCCESS);
4447*12f080e7Smrj 
4448*12f080e7Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4449*12f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
4450*12f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4451*12f080e7Smrj 
4452*12f080e7Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4453*12f080e7Smrj 			return (DDI_DMA_STALE);
4454*12f080e7Smrj 		}
4455*12f080e7Smrj 
4456*12f080e7Smrj 		/* handle the case where we don't have any windows */
4457*12f080e7Smrj 		if (dma->dp_window == NULL) {
4458*12f080e7Smrj 			/*
4459*12f080e7Smrj 			 * if seg == NULL, and we don't have any windows,
4460*12f080e7Smrj 			 * return the first cookie in the sgl.
4461*12f080e7Smrj 			 */
4462*12f080e7Smrj 			if (*lenp == NULL) {
4463*12f080e7Smrj 				dma->dp_current_cookie = 0;
4464*12f080e7Smrj 				hp->dmai_cookie = dma->dp_cookies;
4465*12f080e7Smrj 				*objpp = (caddr_t)handle;
4466*12f080e7Smrj 				return (DDI_SUCCESS);
4467*12f080e7Smrj 
4468*12f080e7Smrj 			/* if we have more cookies, go to the next cookie */
4469*12f080e7Smrj 			} else {
4470*12f080e7Smrj 				if ((dma->dp_current_cookie + 1) >=
4471*12f080e7Smrj 				    dma->dp_sglinfo.si_sgl_size) {
4472*12f080e7Smrj 					return (DDI_DMA_DONE);
4473*12f080e7Smrj 				}
4474*12f080e7Smrj 				dma->dp_current_cookie++;
4475*12f080e7Smrj 				hp->dmai_cookie++;
4476*12f080e7Smrj 				return (DDI_SUCCESS);
4477*12f080e7Smrj 			}
4478*12f080e7Smrj 		}
4479*12f080e7Smrj 
4480*12f080e7Smrj 		/* We have one or more windows */
4481*12f080e7Smrj 		window = &dma->dp_window[dma->dp_current_win];
4482*12f080e7Smrj 
4483*12f080e7Smrj 		/*
4484*12f080e7Smrj 		 * if seg == NULL, return the first cookie in the current
4485*12f080e7Smrj 		 * window
4486*12f080e7Smrj 		 */
4487*12f080e7Smrj 		if (*lenp == NULL) {
4488*12f080e7Smrj 			dma->dp_current_cookie = 0;
4489*12f080e7Smrj 			hp->dmai_cookie = dma->dp_cookies;
4490*12f080e7Smrj 
4491*12f080e7Smrj 		/*
4492*12f080e7Smrj 		 * go to the next cookie in the window then see if we done with
4493*12f080e7Smrj 		 * this window.
4494*12f080e7Smrj 		 */
4495*12f080e7Smrj 		} else {
4496*12f080e7Smrj 			if ((dma->dp_current_cookie + 1) >=
4497*12f080e7Smrj 			    window->wd_cookie_cnt) {
4498*12f080e7Smrj 				return (DDI_DMA_DONE);
4499*12f080e7Smrj 			}
4500*12f080e7Smrj 			dma->dp_current_cookie++;
4501*12f080e7Smrj 			hp->dmai_cookie++;
4502*12f080e7Smrj 		}
4503*12f080e7Smrj 		*objpp = (caddr_t)handle;
4504*12f080e7Smrj 		return (DDI_SUCCESS);
4505*12f080e7Smrj 
4506*12f080e7Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4507*12f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
4508*12f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4509*12f080e7Smrj 
4510*12f080e7Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4511*12f080e7Smrj 			return (DDI_DMA_STALE);
4512*12f080e7Smrj 		}
4513*12f080e7Smrj 
4514*12f080e7Smrj 		/* if win == NULL, return the first window in the bind */
4515*12f080e7Smrj 		if (*offp == NULL) {
4516*12f080e7Smrj 			nwin = 0;
4517*12f080e7Smrj 
4518*12f080e7Smrj 		/*
4519*12f080e7Smrj 		 * else, go to the next window then see if we're done with all
4520*12f080e7Smrj 		 * the windows.
4521*12f080e7Smrj 		 */
4522*12f080e7Smrj 		} else {
4523*12f080e7Smrj 			nwin = dma->dp_current_win + 1;
4524*12f080e7Smrj 			if (nwin >= hp->dmai_nwin) {
4525*12f080e7Smrj 				return (DDI_DMA_DONE);
4526*12f080e7Smrj 			}
4527*12f080e7Smrj 		}
4528*12f080e7Smrj 
4529*12f080e7Smrj 		/* switch to the next window */
4530*12f080e7Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4531*12f080e7Smrj 		    &lcookie, &ccnt);
4532*12f080e7Smrj 		ASSERT(e == DDI_SUCCESS);
4533*12f080e7Smrj 		if (e != DDI_SUCCESS) {
4534*12f080e7Smrj 			return (DDI_DMA_STALE);
4535*12f080e7Smrj 		}
4536*12f080e7Smrj 
4537*12f080e7Smrj 		/* reset the cookie back to the first cookie in the window */
4538*12f080e7Smrj 		if (dma->dp_window != NULL) {
4539*12f080e7Smrj 			window = &dma->dp_window[dma->dp_current_win];
4540*12f080e7Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4541*12f080e7Smrj 		} else {
4542*12f080e7Smrj 			hp->dmai_cookie = dma->dp_cookies;
4543*12f080e7Smrj 		}
4544*12f080e7Smrj 
4545*12f080e7Smrj 		*objpp = (caddr_t)handle;
4546*12f080e7Smrj 		return (DDI_SUCCESS);
4547*12f080e7Smrj 
4548*12f080e7Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
4549*12f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4550*12f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4551*12f080e7Smrj 		if (rootnex_state->r_dvma_call_list_id) {
4552*12f080e7Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4553*12f080e7Smrj 		}
4554*12f080e7Smrj 		return (DDI_SUCCESS);
4555*12f080e7Smrj 
4556*12f080e7Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4557*12f080e7Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4558*12f080e7Smrj 		/* should never get here, handled in genunix */
4559*12f080e7Smrj 		ASSERT(0);
4560*12f080e7Smrj 		return (DDI_FAILURE);
4561*12f080e7Smrj 
4562*12f080e7Smrj 	case DDI_DMA_KVADDR:
4563*12f080e7Smrj 	case DDI_DMA_GETERR:
4564*12f080e7Smrj 	case DDI_DMA_COFF:
4565*12f080e7Smrj 		return (DDI_FAILURE);
4566*12f080e7Smrj 	}
4567*12f080e7Smrj 
4568*12f080e7Smrj 	return (DDI_FAILURE);
4569*12f080e7Smrj #endif /* defined(__amd64) */
45707c478bd9Sstevel@tonic-gate }
4571