xref: /titanic_53/usr/src/uts/i86pc/io/rootnex.c (revision d21b39ddb15d0d66f2803d90307b94acce1cb295)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
500d0963fSdilpreet  * Common Development and Distribution License (the "License").
600d0963fSdilpreet  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*d21b39ddSmrj  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
2912f080e7Smrj  * x86 root nexus driver
307c478bd9Sstevel@tonic-gate  */
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
337c478bd9Sstevel@tonic-gate #include <sys/conf.h>
347c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
357c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
367c478bd9Sstevel@tonic-gate #include <sys/debug.h>
377c478bd9Sstevel@tonic-gate #include <sys/psw.h>
387c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h>
397c478bd9Sstevel@tonic-gate #include <sys/promif.h>
407c478bd9Sstevel@tonic-gate #include <sys/devops.h>
417c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
427c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
437c478bd9Sstevel@tonic-gate #include <vm/seg.h>
447c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
457c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h>
467c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
477c478bd9Sstevel@tonic-gate #include <sys/mman.h>
487c478bd9Sstevel@tonic-gate #include <vm/hat.h>
497c478bd9Sstevel@tonic-gate #include <vm/as.h>
507c478bd9Sstevel@tonic-gate #include <vm/page.h>
517c478bd9Sstevel@tonic-gate #include <sys/avintr.h>
527c478bd9Sstevel@tonic-gate #include <sys/errno.h>
537c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
547c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
557c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
567c478bd9Sstevel@tonic-gate #include <sys/sunndi.h>
577a364d25Sschwartz #include <sys/mach_intr.h>
587c478bd9Sstevel@tonic-gate #include <sys/psm.h>
597c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
6012f080e7Smrj #include <sys/atomic.h>
6112f080e7Smrj #include <sys/sdt.h>
6212f080e7Smrj #include <sys/rootnex.h>
6312f080e7Smrj #include <vm/hat_i86.h>
6400d0963fSdilpreet #include <sys/ddifm.h>
6536945f79Smrj #include <sys/ddi_isa.h>
667c478bd9Sstevel@tonic-gate 
67843e1988Sjohnlev #ifdef __xpv
68843e1988Sjohnlev #include <sys/bootinfo.h>
69843e1988Sjohnlev #include <sys/hypervisor.h>
70843e1988Sjohnlev #include <sys/bootconf.h>
71843e1988Sjohnlev #include <vm/kboot_mmu.h>
72843e1988Sjohnlev #endif
73843e1988Sjohnlev 
7412f080e7Smrj /*
7512f080e7Smrj  * enable/disable extra checking of function parameters. Useful for debugging
7612f080e7Smrj  * drivers.
7712f080e7Smrj  */
7812f080e7Smrj #ifdef	DEBUG
7912f080e7Smrj int rootnex_alloc_check_parms = 1;
8012f080e7Smrj int rootnex_bind_check_parms = 1;
8112f080e7Smrj int rootnex_bind_check_inuse = 1;
8212f080e7Smrj int rootnex_unbind_verify_buffer = 0;
8312f080e7Smrj int rootnex_sync_check_parms = 1;
8412f080e7Smrj #else
8512f080e7Smrj int rootnex_alloc_check_parms = 0;
8612f080e7Smrj int rootnex_bind_check_parms = 0;
8712f080e7Smrj int rootnex_bind_check_inuse = 0;
8812f080e7Smrj int rootnex_unbind_verify_buffer = 0;
8912f080e7Smrj int rootnex_sync_check_parms = 0;
9012f080e7Smrj #endif
917c478bd9Sstevel@tonic-gate 
927aec1d6eScindi /* Master Abort and Target Abort panic flag */
937aec1d6eScindi int rootnex_fm_ma_ta_panic_flag = 0;
947aec1d6eScindi 
9512f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
967c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1;
977c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1;
987c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list;
997c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1007c478bd9Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1017c478bd9Sstevel@tonic-gate 
1027c478bd9Sstevel@tonic-gate /*
10312f080e7Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
10412f080e7Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
10512f080e7Smrj  * the offset and size into ddi_dma_sync().
1067c478bd9Sstevel@tonic-gate  */
10712f080e7Smrj int rootnex_sync_ignore_params = 0;
1087c478bd9Sstevel@tonic-gate 
1097c478bd9Sstevel@tonic-gate /*
11012f080e7Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
11112f080e7Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
11212f080e7Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
11312f080e7Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
11412f080e7Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
11512f080e7Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
11612f080e7Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
11712f080e7Smrj  * attach.
1187c478bd9Sstevel@tonic-gate  */
11912f080e7Smrj #if defined(__amd64)
12012f080e7Smrj int rootnex_prealloc_cookies = 65;
12112f080e7Smrj int rootnex_prealloc_windows = 4;
12212f080e7Smrj int rootnex_prealloc_copybuf = 2;
12312f080e7Smrj #else
12412f080e7Smrj int rootnex_prealloc_cookies = 33;
12512f080e7Smrj int rootnex_prealloc_windows = 4;
12612f080e7Smrj int rootnex_prealloc_copybuf = 2;
12712f080e7Smrj #endif
1287c478bd9Sstevel@tonic-gate 
12912f080e7Smrj /* driver global state */
13012f080e7Smrj static rootnex_state_t *rootnex_state;
13112f080e7Smrj 
13212f080e7Smrj /* shortcut to rootnex counters */
13312f080e7Smrj static uint64_t *rootnex_cnt;
1347c478bd9Sstevel@tonic-gate 
1357c478bd9Sstevel@tonic-gate /*
13612f080e7Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1377c478bd9Sstevel@tonic-gate  */
13812f080e7Smrj /* statically defined integer/boolean properties for the root node */
13912f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = {
14012f080e7Smrj 	{ "PAGESIZE",			PAGESIZE },
14112f080e7Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
14212f080e7Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
14312f080e7Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
14412f080e7Smrj };
14512f080e7Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
1467c478bd9Sstevel@tonic-gate 
147843e1988Sjohnlev #ifdef __xpv
148843e1988Sjohnlev typedef maddr_t rootnex_addr_t;
149843e1988Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
150843e1988Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
151843e1988Sjohnlev #else
152843e1988Sjohnlev typedef paddr_t rootnex_addr_t;
153843e1988Sjohnlev #endif
154843e1988Sjohnlev 
1557c478bd9Sstevel@tonic-gate 
15612f080e7Smrj static struct cb_ops rootnex_cb_ops = {
15712f080e7Smrj 	nodev,		/* open */
15812f080e7Smrj 	nodev,		/* close */
15912f080e7Smrj 	nodev,		/* strategy */
16012f080e7Smrj 	nodev,		/* print */
16112f080e7Smrj 	nodev,		/* dump */
16212f080e7Smrj 	nodev,		/* read */
16312f080e7Smrj 	nodev,		/* write */
16412f080e7Smrj 	nodev,		/* ioctl */
16512f080e7Smrj 	nodev,		/* devmap */
16612f080e7Smrj 	nodev,		/* mmap */
16712f080e7Smrj 	nodev,		/* segmap */
16812f080e7Smrj 	nochpoll,	/* chpoll */
16912f080e7Smrj 	ddi_prop_op,	/* cb_prop_op */
17012f080e7Smrj 	NULL,		/* struct streamtab */
17112f080e7Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
17212f080e7Smrj 	CB_REV,		/* Rev */
17312f080e7Smrj 	nodev,		/* cb_aread */
17412f080e7Smrj 	nodev		/* cb_awrite */
17512f080e7Smrj };
1767c478bd9Sstevel@tonic-gate 
17712f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1787c478bd9Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
17912f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1807c478bd9Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1817c478bd9Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
18212f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1837c478bd9Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
18412f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
18512f080e7Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
18612f080e7Smrj     ddi_dma_handle_t *handlep);
18712f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
18812f080e7Smrj     ddi_dma_handle_t handle);
18912f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
19012f080e7Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
19112f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
19212f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
19312f080e7Smrj     ddi_dma_handle_t handle);
19412f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
19512f080e7Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
19612f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
19712f080e7Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
19812f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
19912f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2007c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2017c478bd9Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
20212f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
20312f080e7Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
20400d0963fSdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
20500d0963fSdilpreet     ddi_iblock_cookie_t *ibc);
20612f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
20712f080e7Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
2087c478bd9Sstevel@tonic-gate 
2097c478bd9Sstevel@tonic-gate 
2107c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2117c478bd9Sstevel@tonic-gate 	BUSO_REV,
2127c478bd9Sstevel@tonic-gate 	rootnex_map,
2137c478bd9Sstevel@tonic-gate 	NULL,
2147c478bd9Sstevel@tonic-gate 	NULL,
2157c478bd9Sstevel@tonic-gate 	NULL,
2167c478bd9Sstevel@tonic-gate 	rootnex_map_fault,
2177c478bd9Sstevel@tonic-gate 	rootnex_dma_map,
2187c478bd9Sstevel@tonic-gate 	rootnex_dma_allochdl,
2197c478bd9Sstevel@tonic-gate 	rootnex_dma_freehdl,
2207c478bd9Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2217c478bd9Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
22212f080e7Smrj 	rootnex_dma_sync,
2237c478bd9Sstevel@tonic-gate 	rootnex_dma_win,
2247c478bd9Sstevel@tonic-gate 	rootnex_dma_mctl,
2257c478bd9Sstevel@tonic-gate 	rootnex_ctlops,
2267c478bd9Sstevel@tonic-gate 	ddi_bus_prop_op,
2277c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2287c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2297c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2307c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2317c478bd9Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2327c478bd9Sstevel@tonic-gate 	0,			/* bus_config */
2337c478bd9Sstevel@tonic-gate 	0,			/* bus_unconfig */
23400d0963fSdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2357c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2367c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2377c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2387c478bd9Sstevel@tonic-gate 	NULL,			/* bus_powr */
2397c478bd9Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2407c478bd9Sstevel@tonic-gate };
2417c478bd9Sstevel@tonic-gate 
24212f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
24312f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2447c478bd9Sstevel@tonic-gate 
2457c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2467c478bd9Sstevel@tonic-gate 	DEVO_REV,
24712f080e7Smrj 	0,
24812f080e7Smrj 	ddi_no_info,
2497c478bd9Sstevel@tonic-gate 	nulldev,
25012f080e7Smrj 	nulldev,
2517c478bd9Sstevel@tonic-gate 	rootnex_attach,
25212f080e7Smrj 	rootnex_detach,
25312f080e7Smrj 	nulldev,
25412f080e7Smrj 	&rootnex_cb_ops,
2557c478bd9Sstevel@tonic-gate 	&rootnex_bus_ops
2567c478bd9Sstevel@tonic-gate };
2577c478bd9Sstevel@tonic-gate 
25812f080e7Smrj static struct modldrv rootnex_modldrv = {
25912f080e7Smrj 	&mod_driverops,
2607c478bd9Sstevel@tonic-gate 	"i86pc root nexus %I%",
26112f080e7Smrj 	&rootnex_ops
2627c478bd9Sstevel@tonic-gate };
2637c478bd9Sstevel@tonic-gate 
26412f080e7Smrj static struct modlinkage rootnex_modlinkage = {
26512f080e7Smrj 	MODREV_1,
26612f080e7Smrj 	(void *)&rootnex_modldrv,
26712f080e7Smrj 	NULL
2687c478bd9Sstevel@tonic-gate };
2697c478bd9Sstevel@tonic-gate 
2707c478bd9Sstevel@tonic-gate 
27112f080e7Smrj /*
27212f080e7Smrj  *  extern hacks
27312f080e7Smrj  */
27412f080e7Smrj extern struct seg_ops segdev_ops;
27512f080e7Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
27612f080e7Smrj #ifdef	DDI_MAP_DEBUG
27712f080e7Smrj extern int ddi_map_debug_flag;
27812f080e7Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
27912f080e7Smrj #endif
28012f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
28112f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
28212f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
28312f080e7Smrj     psm_intr_op_t, int *);
28412f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
28512f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
28636945f79Smrj 
28712f080e7Smrj /*
28812f080e7Smrj  * Use device arena to use for device control register mappings.
28912f080e7Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
29012f080e7Smrj  * to avoid this address range to prevent undesired device activity.
29112f080e7Smrj  */
29212f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
29312f080e7Smrj extern void device_arena_free(void * vaddr, size_t size);
29412f080e7Smrj 
29512f080e7Smrj 
29612f080e7Smrj /*
29712f080e7Smrj  *  Internal functions
29812f080e7Smrj  */
29912f080e7Smrj static int rootnex_dma_init();
30012f080e7Smrj static void rootnex_add_props(dev_info_t *);
30112f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
30212f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
30312f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
30412f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
30512f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
30612f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
30712f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
30812f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
30912f080e7Smrj     ddi_dma_attr_t *attr);
31012f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
31112f080e7Smrj     rootnex_sglinfo_t *sglinfo);
31212f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
31312f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
31412f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
31512f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
31612f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
31712f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
31812f080e7Smrj     ddi_dma_attr_t *attr, int kmflag);
31912f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
32012f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
32112f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
32212f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
32312f080e7Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
32412f080e7Smrj     size_t *copybuf_used, page_t **cur_pp);
32512f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
32612f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
32712f080e7Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
32812f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
32912f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
33012f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
33112f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
33212f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
33312f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
33412f080e7Smrj     off_t offset, size_t size, uint_t cache_flags);
33512f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
33600d0963fSdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
33700d0963fSdilpreet     const void *comp_addr, const void *not_used);
33812f080e7Smrj 
33912f080e7Smrj /*
34012f080e7Smrj  * _init()
34112f080e7Smrj  *
34212f080e7Smrj  */
3437c478bd9Sstevel@tonic-gate int
3447c478bd9Sstevel@tonic-gate _init(void)
3457c478bd9Sstevel@tonic-gate {
34612f080e7Smrj 
34712f080e7Smrj 	rootnex_state = NULL;
34812f080e7Smrj 	return (mod_install(&rootnex_modlinkage));
3497c478bd9Sstevel@tonic-gate }
3507c478bd9Sstevel@tonic-gate 
35112f080e7Smrj 
35212f080e7Smrj /*
35312f080e7Smrj  * _info()
35412f080e7Smrj  *
35512f080e7Smrj  */
35612f080e7Smrj int
35712f080e7Smrj _info(struct modinfo *modinfop)
35812f080e7Smrj {
35912f080e7Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
36012f080e7Smrj }
36112f080e7Smrj 
36212f080e7Smrj 
36312f080e7Smrj /*
36412f080e7Smrj  * _fini()
36512f080e7Smrj  *
36612f080e7Smrj  */
3677c478bd9Sstevel@tonic-gate int
3687c478bd9Sstevel@tonic-gate _fini(void)
3697c478bd9Sstevel@tonic-gate {
3707c478bd9Sstevel@tonic-gate 	return (EBUSY);
3717c478bd9Sstevel@tonic-gate }
3727c478bd9Sstevel@tonic-gate 
37312f080e7Smrj 
37412f080e7Smrj /*
37512f080e7Smrj  * rootnex_attach()
37612f080e7Smrj  *
37712f080e7Smrj  */
37812f080e7Smrj static int
37912f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3807c478bd9Sstevel@tonic-gate {
3817aec1d6eScindi 	int fmcap;
38212f080e7Smrj 	int e;
38312f080e7Smrj 
38412f080e7Smrj 	switch (cmd) {
38512f080e7Smrj 	case DDI_ATTACH:
38612f080e7Smrj 		break;
38712f080e7Smrj 	case DDI_RESUME:
38812f080e7Smrj 		return (DDI_SUCCESS);
38912f080e7Smrj 	default:
39012f080e7Smrj 		return (DDI_FAILURE);
3917c478bd9Sstevel@tonic-gate 	}
3927c478bd9Sstevel@tonic-gate 
3937c478bd9Sstevel@tonic-gate 	/*
39412f080e7Smrj 	 * We should only have one instance of rootnex. Save it away since we
39512f080e7Smrj 	 * don't have an easy way to get it back later.
3967c478bd9Sstevel@tonic-gate 	 */
39712f080e7Smrj 	ASSERT(rootnex_state == NULL);
39812f080e7Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
3997c478bd9Sstevel@tonic-gate 
40012f080e7Smrj 	rootnex_state->r_dip = dip;
4017aec1d6eScindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
40212f080e7Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
40312f080e7Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
4047c478bd9Sstevel@tonic-gate 
4057aec1d6eScindi 	/*
4067aec1d6eScindi 	 * Set minimum fm capability level for i86pc platforms and then
4077aec1d6eScindi 	 * initialize error handling. Since we're the rootnex, we don't
4087aec1d6eScindi 	 * care what's returned in the fmcap field.
4097aec1d6eScindi 	 */
41000d0963fSdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
41100d0963fSdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4127aec1d6eScindi 	fmcap = ddi_system_fmcap;
4137aec1d6eScindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4147aec1d6eScindi 
41512f080e7Smrj 	/* initialize DMA related state */
41612f080e7Smrj 	e = rootnex_dma_init();
41712f080e7Smrj 	if (e != DDI_SUCCESS) {
41812f080e7Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
41912f080e7Smrj 		return (DDI_FAILURE);
42012f080e7Smrj 	}
42112f080e7Smrj 
42212f080e7Smrj 	/* Add static root node properties */
42312f080e7Smrj 	rootnex_add_props(dip);
42412f080e7Smrj 
42512f080e7Smrj 	/* since we can't call ddi_report_dev() */
42612f080e7Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
42712f080e7Smrj 
42812f080e7Smrj 	/* Initialize rootnex event handle */
42912f080e7Smrj 	i_ddi_rootnex_init_events(dip);
43012f080e7Smrj 
43112f080e7Smrj 	return (DDI_SUCCESS);
43212f080e7Smrj }
43312f080e7Smrj 
43412f080e7Smrj 
43512f080e7Smrj /*
43612f080e7Smrj  * rootnex_detach()
43712f080e7Smrj  *
43812f080e7Smrj  */
4397c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4407c478bd9Sstevel@tonic-gate static int
44112f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4427c478bd9Sstevel@tonic-gate {
44312f080e7Smrj 	switch (cmd) {
44412f080e7Smrj 	case DDI_SUSPEND:
44512f080e7Smrj 		break;
44612f080e7Smrj 	default:
44712f080e7Smrj 		return (DDI_FAILURE);
44812f080e7Smrj 	}
4497c478bd9Sstevel@tonic-gate 
45012f080e7Smrj 	return (DDI_SUCCESS);
45112f080e7Smrj }
4527c478bd9Sstevel@tonic-gate 
4537c478bd9Sstevel@tonic-gate 
45412f080e7Smrj /*
45512f080e7Smrj  * rootnex_dma_init()
45612f080e7Smrj  *
45712f080e7Smrj  */
45812f080e7Smrj /*ARGSUSED*/
45912f080e7Smrj static int
46012f080e7Smrj rootnex_dma_init()
46112f080e7Smrj {
46212f080e7Smrj 	size_t bufsize;
46312f080e7Smrj 
46412f080e7Smrj 
46512f080e7Smrj 	/*
46612f080e7Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
46712f080e7Smrj 	 * pre-alloc in dma_alloc_handle
46812f080e7Smrj 	 */
46912f080e7Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
47012f080e7Smrj 	rootnex_state->r_prealloc_size =
47112f080e7Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
47212f080e7Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
47312f080e7Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
47412f080e7Smrj 
47512f080e7Smrj 	/*
47612f080e7Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
47712f080e7Smrj 	 * allocate 16 extra bytes for struct pointer alignment
47812f080e7Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
47912f080e7Smrj 	 */
48012f080e7Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
48112f080e7Smrj 	    rootnex_state->r_prealloc_size + 0x10;
48212f080e7Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
48312f080e7Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
48412f080e7Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
48512f080e7Smrj 		return (DDI_FAILURE);
48612f080e7Smrj 	}
4877c478bd9Sstevel@tonic-gate 
4887c478bd9Sstevel@tonic-gate 	/*
4897c478bd9Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
4907c478bd9Sstevel@tonic-gate 	 * for.
4917c478bd9Sstevel@tonic-gate 	 */
4927c478bd9Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
4937c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
4947c478bd9Sstevel@tonic-gate 
4957c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
4967c478bd9Sstevel@tonic-gate }
4977c478bd9Sstevel@tonic-gate 
4987c478bd9Sstevel@tonic-gate 
4997c478bd9Sstevel@tonic-gate /*
50012f080e7Smrj  * rootnex_add_props()
50112f080e7Smrj  *
5027c478bd9Sstevel@tonic-gate  */
5037c478bd9Sstevel@tonic-gate static void
50412f080e7Smrj rootnex_add_props(dev_info_t *dip)
5057c478bd9Sstevel@tonic-gate {
50612f080e7Smrj 	rootnex_intprop_t *rpp;
5077c478bd9Sstevel@tonic-gate 	int i;
5087c478bd9Sstevel@tonic-gate 
50912f080e7Smrj 	/* Add static integer/boolean properties to the root node */
51012f080e7Smrj 	rpp = rootnex_intprp;
51112f080e7Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
51212f080e7Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
51312f080e7Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
51412f080e7Smrj 	}
5157c478bd9Sstevel@tonic-gate }
5167c478bd9Sstevel@tonic-gate 
51712f080e7Smrj 
51812f080e7Smrj 
5197c478bd9Sstevel@tonic-gate /*
52012f080e7Smrj  * *************************
52112f080e7Smrj  *  ctlops related routines
52212f080e7Smrj  * *************************
52312f080e7Smrj  */
52412f080e7Smrj 
52512f080e7Smrj /*
52612f080e7Smrj  * rootnex_ctlops()
5277c478bd9Sstevel@tonic-gate  *
5287c478bd9Sstevel@tonic-gate  */
529a195726fSgovinda /*ARGSUSED*/
5307c478bd9Sstevel@tonic-gate static int
53112f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
53212f080e7Smrj     void *arg, void *result)
5337c478bd9Sstevel@tonic-gate {
53412f080e7Smrj 	int n, *ptr;
53512f080e7Smrj 	struct ddi_parent_private_data *pdp;
5367c478bd9Sstevel@tonic-gate 
53712f080e7Smrj 	switch (ctlop) {
53812f080e7Smrj 	case DDI_CTLOPS_DMAPMAPC:
5397c478bd9Sstevel@tonic-gate 		/*
54012f080e7Smrj 		 * Return 'partial' to indicate that dma mapping
54112f080e7Smrj 		 * has to be done in the main MMU.
5427c478bd9Sstevel@tonic-gate 		 */
54312f080e7Smrj 		return (DDI_DMA_PARTIAL);
5447c478bd9Sstevel@tonic-gate 
54512f080e7Smrj 	case DDI_CTLOPS_BTOP:
5467c478bd9Sstevel@tonic-gate 		/*
54712f080e7Smrj 		 * Convert byte count input to physical page units.
54812f080e7Smrj 		 * (byte counts that are not a page-size multiple
54912f080e7Smrj 		 * are rounded down)
5507c478bd9Sstevel@tonic-gate 		 */
55112f080e7Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
5527c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5537c478bd9Sstevel@tonic-gate 
55412f080e7Smrj 	case DDI_CTLOPS_PTOB:
5557c478bd9Sstevel@tonic-gate 		/*
55612f080e7Smrj 		 * Convert size in physical pages to bytes
5577c478bd9Sstevel@tonic-gate 		 */
55812f080e7Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
5597c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5607c478bd9Sstevel@tonic-gate 
56112f080e7Smrj 	case DDI_CTLOPS_BTOPR:
5627c478bd9Sstevel@tonic-gate 		/*
56312f080e7Smrj 		 * Convert byte count input to physical page units
56412f080e7Smrj 		 * (byte counts that are not a page-size multiple
56512f080e7Smrj 		 * are rounded up)
5667c478bd9Sstevel@tonic-gate 		 */
56712f080e7Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
56812f080e7Smrj 		return (DDI_SUCCESS);
56912f080e7Smrj 
57012f080e7Smrj 	case DDI_CTLOPS_INITCHILD:
57112f080e7Smrj 		return (impl_ddi_sunbus_initchild(arg));
57212f080e7Smrj 
57312f080e7Smrj 	case DDI_CTLOPS_UNINITCHILD:
57412f080e7Smrj 		impl_ddi_sunbus_removechild(arg);
57512f080e7Smrj 		return (DDI_SUCCESS);
57612f080e7Smrj 
57712f080e7Smrj 	case DDI_CTLOPS_REPORTDEV:
57812f080e7Smrj 		return (rootnex_ctl_reportdev(rdip));
57912f080e7Smrj 
58012f080e7Smrj 	case DDI_CTLOPS_IOMIN:
5817c478bd9Sstevel@tonic-gate 		/*
58212f080e7Smrj 		 * Nothing to do here but reflect back..
5837c478bd9Sstevel@tonic-gate 		 */
5847c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5857c478bd9Sstevel@tonic-gate 
58612f080e7Smrj 	case DDI_CTLOPS_REGSIZE:
58712f080e7Smrj 	case DDI_CTLOPS_NREGS:
58812f080e7Smrj 		break;
5897c478bd9Sstevel@tonic-gate 
59012f080e7Smrj 	case DDI_CTLOPS_SIDDEV:
59112f080e7Smrj 		if (ndi_dev_is_prom_node(rdip))
5927c478bd9Sstevel@tonic-gate 			return (DDI_SUCCESS);
59312f080e7Smrj 		if (ndi_dev_is_persistent_node(rdip))
59412f080e7Smrj 			return (DDI_SUCCESS);
5957c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
5967c478bd9Sstevel@tonic-gate 
59712f080e7Smrj 	case DDI_CTLOPS_POWER:
59812f080e7Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
59912f080e7Smrj 
600a195726fSgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
60112f080e7Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
60212f080e7Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
60312f080e7Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
604a195726fSgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
605a195726fSgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
60612f080e7Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
60712f080e7Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
60812f080e7Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
60912f080e7Smrj 			    "1 or more reserved/obsolete operations.");
6107c478bd9Sstevel@tonic-gate 		}
61112f080e7Smrj 		return (DDI_FAILURE);
6127c478bd9Sstevel@tonic-gate 
6137c478bd9Sstevel@tonic-gate 	default:
6147c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6157c478bd9Sstevel@tonic-gate 	}
61612f080e7Smrj 	/*
61712f080e7Smrj 	 * The rest are for "hardware" properties
61812f080e7Smrj 	 */
61912f080e7Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
62012f080e7Smrj 		return (DDI_FAILURE);
6217c478bd9Sstevel@tonic-gate 
62212f080e7Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
62312f080e7Smrj 		ptr = (int *)result;
62412f080e7Smrj 		*ptr = pdp->par_nreg;
62512f080e7Smrj 	} else {
62612f080e7Smrj 		off_t *size = (off_t *)result;
6277c478bd9Sstevel@tonic-gate 
62812f080e7Smrj 		ptr = (int *)arg;
62912f080e7Smrj 		n = *ptr;
63012f080e7Smrj 		if (n >= pdp->par_nreg) {
63112f080e7Smrj 			return (DDI_FAILURE);
63212f080e7Smrj 		}
63312f080e7Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
63412f080e7Smrj 	}
6357c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
6367c478bd9Sstevel@tonic-gate }
6377c478bd9Sstevel@tonic-gate 
63812f080e7Smrj 
63912f080e7Smrj /*
64012f080e7Smrj  * rootnex_ctl_reportdev()
64112f080e7Smrj  *
64212f080e7Smrj  */
6437c478bd9Sstevel@tonic-gate static int
64412f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev)
64512f080e7Smrj {
64612f080e7Smrj 	int i, n, len, f_len = 0;
64712f080e7Smrj 	char *buf;
64812f080e7Smrj 
64912f080e7Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
65012f080e7Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
65112f080e7Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
65212f080e7Smrj 	len = strlen(buf);
65312f080e7Smrj 
65412f080e7Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
65512f080e7Smrj 
65612f080e7Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
65712f080e7Smrj 
65812f080e7Smrj 		if (i == 0)
65912f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
66012f080e7Smrj 			    ": ");
66112f080e7Smrj 		else
66212f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
66312f080e7Smrj 			    " and ");
66412f080e7Smrj 		len = strlen(buf);
66512f080e7Smrj 
66612f080e7Smrj 		switch (rp->regspec_bustype) {
66712f080e7Smrj 
66812f080e7Smrj 		case BTEISA:
66912f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
67012f080e7Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
67112f080e7Smrj 			break;
67212f080e7Smrj 
67312f080e7Smrj 		case BTISA:
67412f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
67512f080e7Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
67612f080e7Smrj 			break;
67712f080e7Smrj 
67812f080e7Smrj 		default:
67912f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
68012f080e7Smrj 			    "space %x offset %x",
68112f080e7Smrj 			    rp->regspec_bustype, rp->regspec_addr);
68212f080e7Smrj 			break;
68312f080e7Smrj 		}
68412f080e7Smrj 		len = strlen(buf);
68512f080e7Smrj 	}
68612f080e7Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
68712f080e7Smrj 		int pri;
68812f080e7Smrj 
68912f080e7Smrj 		if (i != 0) {
69012f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
69112f080e7Smrj 			    ",");
69212f080e7Smrj 			len = strlen(buf);
69312f080e7Smrj 		}
69412f080e7Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
69512f080e7Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
69612f080e7Smrj 		    " sparc ipl %d", pri);
69712f080e7Smrj 		len = strlen(buf);
69812f080e7Smrj 	}
69912f080e7Smrj #ifdef DEBUG
70012f080e7Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
70112f080e7Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
70212f080e7Smrj 		    "printed length 1024, real length %d", f_len);
70312f080e7Smrj 	}
70412f080e7Smrj #endif /* DEBUG */
70512f080e7Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
70612f080e7Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
70712f080e7Smrj 	return (DDI_SUCCESS);
70812f080e7Smrj }
70912f080e7Smrj 
71012f080e7Smrj 
71112f080e7Smrj /*
71212f080e7Smrj  * ******************
71312f080e7Smrj  *  map related code
71412f080e7Smrj  * ******************
71512f080e7Smrj  */
71612f080e7Smrj 
71712f080e7Smrj /*
71812f080e7Smrj  * rootnex_map()
71912f080e7Smrj  *
72012f080e7Smrj  */
72112f080e7Smrj static int
72212f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
72312f080e7Smrj     off_t len, caddr_t *vaddrp)
7247c478bd9Sstevel@tonic-gate {
7257c478bd9Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
7267c478bd9Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
7277c478bd9Sstevel@tonic-gate 	int error;
7287c478bd9Sstevel@tonic-gate 
7297c478bd9Sstevel@tonic-gate 	mp = &mr;
7307c478bd9Sstevel@tonic-gate 
7317c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
7327c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
7337c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
7347c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
7357c478bd9Sstevel@tonic-gate 		break;
7367c478bd9Sstevel@tonic-gate 	default:
7377c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7387c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
7397c478bd9Sstevel@tonic-gate 		    mp->map_op);
7407c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7417c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
7427c478bd9Sstevel@tonic-gate 	}
7437c478bd9Sstevel@tonic-gate 
7447c478bd9Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
7457c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7467c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
7477c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7487c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
7497c478bd9Sstevel@tonic-gate 	}
7507c478bd9Sstevel@tonic-gate 
7517c478bd9Sstevel@tonic-gate 	/*
7527c478bd9Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
7537c478bd9Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
7547c478bd9Sstevel@tonic-gate 	 */
7557c478bd9Sstevel@tonic-gate 
7567c478bd9Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
7577c478bd9Sstevel@tonic-gate 
7587c478bd9Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
7597c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7607c478bd9Sstevel@tonic-gate 		static char *out_of_range =
7617c478bd9Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
7627c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
7657c478bd9Sstevel@tonic-gate 		if (rp == NULL)  {
7667c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7677c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
7687c478bd9Sstevel@tonic-gate 			    ddi_get_name(rdip));
7697c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7707c478bd9Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
7717c478bd9Sstevel@tonic-gate 		}
7727c478bd9Sstevel@tonic-gate 
7737c478bd9Sstevel@tonic-gate 		/*
7747c478bd9Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
7757c478bd9Sstevel@tonic-gate 		 */
7767c478bd9Sstevel@tonic-gate 
7777c478bd9Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
7787c478bd9Sstevel@tonic-gate 		mp->map_obj.rp = rp;
7797c478bd9Sstevel@tonic-gate 	}
7807c478bd9Sstevel@tonic-gate 
7817c478bd9Sstevel@tonic-gate 	/*
7827c478bd9Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
7837c478bd9Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
7847c478bd9Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
7857c478bd9Sstevel@tonic-gate 	 */
7867c478bd9Sstevel@tonic-gate 
7877c478bd9Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
7887c478bd9Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
7897c478bd9Sstevel@tonic-gate 
7907c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
791843e1988Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
792843e1988Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
793843e1988Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
794843e1988Sjohnlev 	    len, mp->map_handlep);
7957c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7967c478bd9Sstevel@tonic-gate 
7977c478bd9Sstevel@tonic-gate 	/*
7987c478bd9Sstevel@tonic-gate 	 * I/O or memory mapping:
7997c478bd9Sstevel@tonic-gate 	 *
8007c478bd9Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8017c478bd9Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8027c478bd9Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8037c478bd9Sstevel@tonic-gate 	 */
8047c478bd9Sstevel@tonic-gate 
8057c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8067c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8077c478bd9Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8087c478bd9Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8097c478bd9Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8107c478bd9Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8117c478bd9Sstevel@tonic-gate 	}
8127c478bd9Sstevel@tonic-gate 
8137c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8147c478bd9Sstevel@tonic-gate 		/*
8157c478bd9Sstevel@tonic-gate 		 * compatibility i/o mapping
8167c478bd9Sstevel@tonic-gate 		 */
8177c478bd9Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8187c478bd9Sstevel@tonic-gate 	} else {
8197c478bd9Sstevel@tonic-gate 		/*
8207c478bd9Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8217c478bd9Sstevel@tonic-gate 		 */
8227c478bd9Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8237c478bd9Sstevel@tonic-gate 	}
8247c478bd9Sstevel@tonic-gate 
8257c478bd9Sstevel@tonic-gate 	if (len != 0)
8267c478bd9Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
8277c478bd9Sstevel@tonic-gate 
8287c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
829843e1988Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
830843e1988Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8317c478bd9Sstevel@tonic-gate 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
8327c478bd9Sstevel@tonic-gate 	    offset, len, mp->map_handlep);
8337c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8347c478bd9Sstevel@tonic-gate 
8357c478bd9Sstevel@tonic-gate 	/*
8367c478bd9Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
8377c478bd9Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
8387c478bd9Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
8397c478bd9Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
8407c478bd9Sstevel@tonic-gate 	 */
8417c478bd9Sstevel@tonic-gate 
8427c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8437c478bd9Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
8447c478bd9Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
8457c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8467c478bd9Sstevel@tonic-gate 
8477c478bd9Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
8487c478bd9Sstevel@tonic-gate 		return (error);
8497c478bd9Sstevel@tonic-gate 
8507c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
8517c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8527c478bd9Sstevel@tonic-gate 
8537c478bd9Sstevel@tonic-gate 		/*
8547c478bd9Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
8557c478bd9Sstevel@tonic-gate 		 */
8567c478bd9Sstevel@tonic-gate 
8577c478bd9Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
8587c478bd9Sstevel@tonic-gate 
8597c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8607c478bd9Sstevel@tonic-gate 
8617c478bd9Sstevel@tonic-gate 		/*
8627c478bd9Sstevel@tonic-gate 		 * Release mapping...
8637c478bd9Sstevel@tonic-gate 		 */
8647c478bd9Sstevel@tonic-gate 
8657c478bd9Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
8667c478bd9Sstevel@tonic-gate 
8677c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8687c478bd9Sstevel@tonic-gate 
8697c478bd9Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate 	default:
8727c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8737c478bd9Sstevel@tonic-gate 	}
8747c478bd9Sstevel@tonic-gate }
8757c478bd9Sstevel@tonic-gate 
8767c478bd9Sstevel@tonic-gate 
8777c478bd9Sstevel@tonic-gate /*
87812f080e7Smrj  * rootnex_map_fault()
8797c478bd9Sstevel@tonic-gate  *
8807c478bd9Sstevel@tonic-gate  *	fault in mappings for requestors
8817c478bd9Sstevel@tonic-gate  */
8827c478bd9Sstevel@tonic-gate /*ARGSUSED*/
8837c478bd9Sstevel@tonic-gate static int
88412f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
88512f080e7Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
88612f080e7Smrj     uint_t lock)
8877c478bd9Sstevel@tonic-gate {
8887c478bd9Sstevel@tonic-gate 
8897c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8907c478bd9Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
8917c478bd9Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
8927c478bd9Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
8937c478bd9Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
8947c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8957c478bd9Sstevel@tonic-gate 
8967c478bd9Sstevel@tonic-gate 	/*
8977c478bd9Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
8987c478bd9Sstevel@tonic-gate 	 *
8997c478bd9Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9007c478bd9Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9017c478bd9Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9027c478bd9Sstevel@tonic-gate 	 */
9037c478bd9Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
904843e1988Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9057c478bd9Sstevel@tonic-gate 
9067c478bd9Sstevel@tonic-gate 		if (hat == NULL) {
9077c478bd9Sstevel@tonic-gate 			/*
9087c478bd9Sstevel@tonic-gate 			 * This is one plausible interpretation of
9097c478bd9Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9107c478bd9Sstevel@tonic-gate 			 * address space hat list which by convention is
9117c478bd9Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9127c478bd9Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9137c478bd9Sstevel@tonic-gate 			 */
9147c478bd9Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9157c478bd9Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9167c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9177c478bd9Sstevel@tonic-gate 		}
9187c478bd9Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9197c478bd9Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9207c478bd9Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9217c478bd9Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9227c478bd9Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9237c478bd9Sstevel@tonic-gate 	} else
9247c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
9257c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
9267c478bd9Sstevel@tonic-gate }
9277c478bd9Sstevel@tonic-gate 
9287c478bd9Sstevel@tonic-gate 
9297c478bd9Sstevel@tonic-gate /*
93012f080e7Smrj  * rootnex_map_regspec()
93112f080e7Smrj  *     we don't support mapping of I/O cards above 4Gb
9327c478bd9Sstevel@tonic-gate  */
9337c478bd9Sstevel@tonic-gate static int
93412f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
9357c478bd9Sstevel@tonic-gate {
936843e1988Sjohnlev 	rootnex_addr_t rbase;
93712f080e7Smrj 	void *cvaddr;
93812f080e7Smrj 	uint_t npages, pgoffset;
93912f080e7Smrj 	struct regspec *rp;
94012f080e7Smrj 	ddi_acc_hdl_t *hp;
94112f080e7Smrj 	ddi_acc_impl_t *ap;
94212f080e7Smrj 	uint_t	hat_acc_flags;
943843e1988Sjohnlev 	paddr_t pbase;
9447c478bd9Sstevel@tonic-gate 
94512f080e7Smrj 	rp = mp->map_obj.rp;
94612f080e7Smrj 	hp = mp->map_handlep;
94712f080e7Smrj 
94812f080e7Smrj #ifdef	DDI_MAP_DEBUG
94912f080e7Smrj 	ddi_map_debug(
95012f080e7Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
95112f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
95212f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
95312f080e7Smrj #endif	/* DDI_MAP_DEBUG */
9547c478bd9Sstevel@tonic-gate 
9557c478bd9Sstevel@tonic-gate 	/*
95612f080e7Smrj 	 * I/O or memory mapping
95712f080e7Smrj 	 *
95812f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
95912f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
96012f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
9617c478bd9Sstevel@tonic-gate 	 */
96212f080e7Smrj 
96312f080e7Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
96412f080e7Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
96512f080e7Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
96612f080e7Smrj 		    rp->regspec_addr, rp->regspec_size);
96712f080e7Smrj 		return (DDI_FAILURE);
9687c478bd9Sstevel@tonic-gate 	}
96912f080e7Smrj 
97012f080e7Smrj 	if (rp->regspec_bustype != 0) {
9717c478bd9Sstevel@tonic-gate 		/*
97212f080e7Smrj 		 * I/O space - needs a handle.
9737c478bd9Sstevel@tonic-gate 		 */
9747c478bd9Sstevel@tonic-gate 		if (hp == NULL) {
97512f080e7Smrj 			return (DDI_FAILURE);
9767c478bd9Sstevel@tonic-gate 		}
97712f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
97812f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
97912f080e7Smrj 		impl_acc_hdl_init(hp);
9807c478bd9Sstevel@tonic-gate 
98112f080e7Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
98212f080e7Smrj #ifdef  DDI_MAP_DEBUG
983843e1988Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
984843e1988Sjohnlev 			    "to I/O space is not supported.\n");
98512f080e7Smrj #endif  /* DDI_MAP_DEBUG */
98612f080e7Smrj 			return (DDI_ME_INVAL);
9877c478bd9Sstevel@tonic-gate 		} else {
9887c478bd9Sstevel@tonic-gate 			/*
98912f080e7Smrj 			 * 1275-compliant vs. compatibility i/o mapping
9907c478bd9Sstevel@tonic-gate 			 */
99112f080e7Smrj 			*vaddrp =
99212f080e7Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
99312f080e7Smrj 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
99412f080e7Smrj 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
995843e1988Sjohnlev #ifdef __xpv
996843e1988Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
997843e1988Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
998843e1988Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
999843e1988Sjohnlev 				    MMU_PAGEMASK));
1000843e1988Sjohnlev 			} else {
1001843e1988Sjohnlev 				hp->ah_pfn = mmu_btop(
1002843e1988Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
1003843e1988Sjohnlev 			}
1004843e1988Sjohnlev #else
100500d0963fSdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
1006843e1988Sjohnlev 			    MMU_PAGEMASK);
1007843e1988Sjohnlev #endif
100800d0963fSdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
100900d0963fSdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
10107c478bd9Sstevel@tonic-gate 		}
10117c478bd9Sstevel@tonic-gate 
101212f080e7Smrj #ifdef	DDI_MAP_DEBUG
101312f080e7Smrj 		ddi_map_debug(
101412f080e7Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
101512f080e7Smrj 		    rp->regspec_size, *vaddrp);
101612f080e7Smrj #endif	/* DDI_MAP_DEBUG */
101712f080e7Smrj 		return (DDI_SUCCESS);
10187c478bd9Sstevel@tonic-gate 	}
10197c478bd9Sstevel@tonic-gate 
10207c478bd9Sstevel@tonic-gate 	/*
102112f080e7Smrj 	 * Memory space
102212f080e7Smrj 	 */
102312f080e7Smrj 
102412f080e7Smrj 	if (hp != NULL) {
102512f080e7Smrj 		/*
102612f080e7Smrj 		 * hat layer ignores
102712f080e7Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
102812f080e7Smrj 		 */
102912f080e7Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
103012f080e7Smrj 		case DDI_STRICTORDER_ACC:
103112f080e7Smrj 			hat_acc_flags = HAT_STRICTORDER;
103212f080e7Smrj 			break;
103312f080e7Smrj 		case DDI_UNORDERED_OK_ACC:
103412f080e7Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
103512f080e7Smrj 			break;
103612f080e7Smrj 		case DDI_MERGING_OK_ACC:
103712f080e7Smrj 			hat_acc_flags = HAT_MERGING_OK;
103812f080e7Smrj 			break;
103912f080e7Smrj 		case DDI_LOADCACHING_OK_ACC:
104012f080e7Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
104112f080e7Smrj 			break;
104212f080e7Smrj 		case DDI_STORECACHING_OK_ACC:
104312f080e7Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
104412f080e7Smrj 			break;
104512f080e7Smrj 		}
104612f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
104712f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
104812f080e7Smrj 		impl_acc_hdl_init(hp);
104912f080e7Smrj 		hp->ah_hat_flags = hat_acc_flags;
105012f080e7Smrj 	} else {
105112f080e7Smrj 		hat_acc_flags = HAT_STRICTORDER;
105212f080e7Smrj 	}
105312f080e7Smrj 
1054843e1988Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
1055843e1988Sjohnlev #ifdef __xpv
1056843e1988Sjohnlev 	/*
1057843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
1058843e1988Sjohnlev 	 * the MA to a PA.
1059843e1988Sjohnlev 	 */
1060843e1988Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1061843e1988Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
1062843e1988Sjohnlev 	} else {
1063843e1988Sjohnlev 		pbase = rbase;
1064843e1988Sjohnlev 	}
1065843e1988Sjohnlev #else
1066843e1988Sjohnlev 	pbase = rbase;
1067843e1988Sjohnlev #endif
1068843e1988Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
106912f080e7Smrj 
107012f080e7Smrj 	if (rp->regspec_size == 0) {
107112f080e7Smrj #ifdef  DDI_MAP_DEBUG
107212f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
107312f080e7Smrj #endif  /* DDI_MAP_DEBUG */
107412f080e7Smrj 		return (DDI_ME_INVAL);
107512f080e7Smrj 	}
107612f080e7Smrj 
107712f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1078843e1988Sjohnlev 		/* extra cast to make gcc happy */
1079843e1988Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
108012f080e7Smrj 	} else {
108112f080e7Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
108212f080e7Smrj 
108312f080e7Smrj #ifdef	DDI_MAP_DEBUG
1084843e1988Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1085843e1988Sjohnlev 		    "physical %llx", npages, pbase);
108612f080e7Smrj #endif	/* DDI_MAP_DEBUG */
108712f080e7Smrj 
108812f080e7Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
108912f080e7Smrj 		if (cvaddr == NULL)
109012f080e7Smrj 			return (DDI_ME_NORESOURCES);
109112f080e7Smrj 
109212f080e7Smrj 		/*
109312f080e7Smrj 		 * Now map in the pages we've allocated...
109412f080e7Smrj 		 */
1095843e1988Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
1096843e1988Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
1097843e1988Sjohnlev 		    HAT_LOAD_LOCK);
109812f080e7Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
109900d0963fSdilpreet 
110000d0963fSdilpreet 		/* save away pfn and npages for FMA */
110100d0963fSdilpreet 		hp = mp->map_handlep;
110200d0963fSdilpreet 		if (hp) {
1103843e1988Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
110400d0963fSdilpreet 			hp->ah_pnum = npages;
110500d0963fSdilpreet 		}
110612f080e7Smrj 	}
110712f080e7Smrj 
110812f080e7Smrj #ifdef	DDI_MAP_DEBUG
110912f080e7Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
111012f080e7Smrj #endif	/* DDI_MAP_DEBUG */
111112f080e7Smrj 	return (DDI_SUCCESS);
111212f080e7Smrj }
111312f080e7Smrj 
111412f080e7Smrj 
111512f080e7Smrj /*
111612f080e7Smrj  * rootnex_unmap_regspec()
11177c478bd9Sstevel@tonic-gate  *
11187c478bd9Sstevel@tonic-gate  */
11197c478bd9Sstevel@tonic-gate static int
112012f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
11217c478bd9Sstevel@tonic-gate {
112212f080e7Smrj 	caddr_t addr = (caddr_t)*vaddrp;
112312f080e7Smrj 	uint_t npages, pgoffset;
112412f080e7Smrj 	struct regspec *rp;
11257c478bd9Sstevel@tonic-gate 
112612f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
112712f080e7Smrj 		return (0);
11287c478bd9Sstevel@tonic-gate 
112912f080e7Smrj 	rp = mp->map_obj.rp;
11307c478bd9Sstevel@tonic-gate 
113112f080e7Smrj 	if (rp->regspec_size == 0) {
113212f080e7Smrj #ifdef  DDI_MAP_DEBUG
113312f080e7Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
113412f080e7Smrj #endif  /* DDI_MAP_DEBUG */
113512f080e7Smrj 		return (DDI_ME_INVAL);
11367c478bd9Sstevel@tonic-gate 	}
11377c478bd9Sstevel@tonic-gate 
11387c478bd9Sstevel@tonic-gate 	/*
113912f080e7Smrj 	 * I/O or memory mapping:
11407c478bd9Sstevel@tonic-gate 	 *
114112f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
114212f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
114312f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
11447c478bd9Sstevel@tonic-gate 	 */
114512f080e7Smrj 	if (rp->regspec_bustype != 0) {
11467c478bd9Sstevel@tonic-gate 		/*
114712f080e7Smrj 		 * This is I/O space, which requires no particular
114812f080e7Smrj 		 * processing on unmap since it isn't mapped in the
114912f080e7Smrj 		 * first place.
11507c478bd9Sstevel@tonic-gate 		 */
11517c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
11527c478bd9Sstevel@tonic-gate 	}
11537c478bd9Sstevel@tonic-gate 
11547c478bd9Sstevel@tonic-gate 	/*
115512f080e7Smrj 	 * Memory space
11567c478bd9Sstevel@tonic-gate 	 */
115712f080e7Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
115812f080e7Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
115912f080e7Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
116012f080e7Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
11617c478bd9Sstevel@tonic-gate 
11627c478bd9Sstevel@tonic-gate 	/*
116312f080e7Smrj 	 * Destroy the pointer - the mapping has logically gone
11647c478bd9Sstevel@tonic-gate 	 */
116512f080e7Smrj 	*vaddrp = NULL;
11667c478bd9Sstevel@tonic-gate 
11677c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
11687c478bd9Sstevel@tonic-gate }
11697c478bd9Sstevel@tonic-gate 
117012f080e7Smrj 
117112f080e7Smrj /*
117212f080e7Smrj  * rootnex_map_handle()
117312f080e7Smrj  *
117412f080e7Smrj  */
11757c478bd9Sstevel@tonic-gate static int
117612f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp)
11777c478bd9Sstevel@tonic-gate {
1178843e1988Sjohnlev 	rootnex_addr_t rbase;
117912f080e7Smrj 	ddi_acc_hdl_t *hp;
118012f080e7Smrj 	uint_t pgoffset;
118112f080e7Smrj 	struct regspec *rp;
1182843e1988Sjohnlev 	paddr_t pbase;
11837c478bd9Sstevel@tonic-gate 
118412f080e7Smrj 	rp = mp->map_obj.rp;
11857c478bd9Sstevel@tonic-gate 
118612f080e7Smrj #ifdef	DDI_MAP_DEBUG
118712f080e7Smrj 	ddi_map_debug(
118812f080e7Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
118912f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
119012f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
119112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
11927c478bd9Sstevel@tonic-gate 
11937c478bd9Sstevel@tonic-gate 	/*
119412f080e7Smrj 	 * I/O or memory mapping:
119512f080e7Smrj 	 *
119612f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
119712f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
119812f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
11997c478bd9Sstevel@tonic-gate 	 */
120012f080e7Smrj 	if (rp->regspec_bustype != 0) {
120112f080e7Smrj 		/*
120212f080e7Smrj 		 * This refers to I/O space, and we don't support "mapping"
120312f080e7Smrj 		 * I/O space to a user.
120412f080e7Smrj 		 */
12057c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12067c478bd9Sstevel@tonic-gate 	}
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate 	/*
120912f080e7Smrj 	 * Set up the hat_flags for the mapping.
12107c478bd9Sstevel@tonic-gate 	 */
121112f080e7Smrj 	hp = mp->map_handlep;
12127c478bd9Sstevel@tonic-gate 
121312f080e7Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
121412f080e7Smrj 	case DDI_NEVERSWAP_ACC:
121512f080e7Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
12167c478bd9Sstevel@tonic-gate 		break;
121712f080e7Smrj 	case DDI_STRUCTURE_LE_ACC:
121812f080e7Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
12197c478bd9Sstevel@tonic-gate 		break;
122012f080e7Smrj 	case DDI_STRUCTURE_BE_ACC:
12217c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12227c478bd9Sstevel@tonic-gate 	default:
122312f080e7Smrj 		return (DDI_REGS_ACC_CONFLICT);
12247c478bd9Sstevel@tonic-gate 	}
12257c478bd9Sstevel@tonic-gate 
122612f080e7Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
122712f080e7Smrj 	case DDI_STRICTORDER_ACC:
12287c478bd9Sstevel@tonic-gate 		break;
122912f080e7Smrj 	case DDI_UNORDERED_OK_ACC:
123012f080e7Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
12317c478bd9Sstevel@tonic-gate 		break;
123212f080e7Smrj 	case DDI_MERGING_OK_ACC:
123312f080e7Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
12347c478bd9Sstevel@tonic-gate 		break;
123512f080e7Smrj 	case DDI_LOADCACHING_OK_ACC:
123612f080e7Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
123712f080e7Smrj 		break;
123812f080e7Smrj 	case DDI_STORECACHING_OK_ACC:
123912f080e7Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
124012f080e7Smrj 		break;
12417c478bd9Sstevel@tonic-gate 	default:
12427c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12437c478bd9Sstevel@tonic-gate 	}
12447c478bd9Sstevel@tonic-gate 
1245843e1988Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
1246843e1988Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
1247843e1988Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
12487c478bd9Sstevel@tonic-gate 
124912f080e7Smrj 	if (rp->regspec_size == 0)
125012f080e7Smrj 		return (DDI_ME_INVAL);
12517c478bd9Sstevel@tonic-gate 
1252843e1988Sjohnlev #ifdef __xpv
1253843e1988Sjohnlev 	/*
1254843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
1255843e1988Sjohnlev 	 * the MA to a PA.
1256843e1988Sjohnlev 	 */
1257843e1988Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1258843e1988Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
1259843e1988Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
1260843e1988Sjohnlev 	} else {
1261843e1988Sjohnlev 		pbase = rbase;
1262843e1988Sjohnlev 	}
1263843e1988Sjohnlev #else
1264843e1988Sjohnlev 	pbase = rbase;
1265843e1988Sjohnlev #endif
1266843e1988Sjohnlev 
1267843e1988Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
126812f080e7Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
12697c478bd9Sstevel@tonic-gate 
12707c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
12717c478bd9Sstevel@tonic-gate }
12727c478bd9Sstevel@tonic-gate 
127312f080e7Smrj 
127412f080e7Smrj 
12757c478bd9Sstevel@tonic-gate /*
127612f080e7Smrj  * ************************
127712f080e7Smrj  *  interrupt related code
127812f080e7Smrj  * ************************
12797c478bd9Sstevel@tonic-gate  */
12807c478bd9Sstevel@tonic-gate 
12817c478bd9Sstevel@tonic-gate /*
128212f080e7Smrj  * rootnex_intr_ops()
12837c478bd9Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
12847c478bd9Sstevel@tonic-gate  */
12857c478bd9Sstevel@tonic-gate /* ARGSUSED */
12867c478bd9Sstevel@tonic-gate static int
12877c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
12887c478bd9Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
12897c478bd9Sstevel@tonic-gate {
12907c478bd9Sstevel@tonic-gate 	struct intrspec			*ispec;
12917c478bd9Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
12927c478bd9Sstevel@tonic-gate 
12937c478bd9Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
12947c478bd9Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
12957c478bd9Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
12967c478bd9Sstevel@tonic-gate 
12977c478bd9Sstevel@tonic-gate 	/* Process the interrupt operation */
12987c478bd9Sstevel@tonic-gate 	switch (intr_op) {
12997c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13007c478bd9Sstevel@tonic-gate 		/* First check with pcplusmp */
13017c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13027c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13037c478bd9Sstevel@tonic-gate 
13047c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13057c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
13067c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13077c478bd9Sstevel@tonic-gate 		}
13087c478bd9Sstevel@tonic-gate 		break;
13097c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13107c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13117c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13127c478bd9Sstevel@tonic-gate 
13137c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13147c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13157c478bd9Sstevel@tonic-gate 		break;
13167c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13177c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13187c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13197c478bd9Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13207c478bd9Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13217c478bd9Sstevel@tonic-gate 		break;
13227c478bd9Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13237c478bd9Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13247c478bd9Sstevel@tonic-gate 		/*
13257c478bd9Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
13267c478bd9Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
13277c478bd9Sstevel@tonic-gate 		 * See detailed comments on this in the function
13287c478bd9Sstevel@tonic-gate 		 * rootnex_get_ispec().
13297c478bd9Sstevel@tonic-gate 		 */
13307c478bd9Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
13317c478bd9Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
13327c478bd9Sstevel@tonic-gate 			    pdp->par_nintr);
13337c478bd9Sstevel@tonic-gate 			/*
13347c478bd9Sstevel@tonic-gate 			 * Set it to zero; so that
13357c478bd9Sstevel@tonic-gate 			 * DDI framework doesn't free it again
13367c478bd9Sstevel@tonic-gate 			 */
13377c478bd9Sstevel@tonic-gate 			pdp->par_intr = NULL;
13387c478bd9Sstevel@tonic-gate 			pdp->par_nintr = 0;
13397c478bd9Sstevel@tonic-gate 		}
13407c478bd9Sstevel@tonic-gate 		break;
13417c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
13427c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13437c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13447c478bd9Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
13457c478bd9Sstevel@tonic-gate 		break;
13467c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
13477c478bd9Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
13487c478bd9Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
13497c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13507c478bd9Sstevel@tonic-gate 
13517c478bd9Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
13527c478bd9Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
13537c478bd9Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
13547c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13557c478bd9Sstevel@tonic-gate 
13567c478bd9Sstevel@tonic-gate 		/* Change the priority */
13577c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
13587c478bd9Sstevel@tonic-gate 		    PSM_FAILURE)
13597c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13607c478bd9Sstevel@tonic-gate 
13617c478bd9Sstevel@tonic-gate 		/* update the ispec with the new priority */
13627c478bd9Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
13637c478bd9Sstevel@tonic-gate 		break;
13647c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
13657c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13667c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13677c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
13687c478bd9Sstevel@tonic-gate 		break;
13697c478bd9Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
13707c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13717c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13727c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
13737c478bd9Sstevel@tonic-gate 		break;
13747c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
13757c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13767c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13777c478bd9Sstevel@tonic-gate 
13787c478bd9Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
13797c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13807c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13817c478bd9Sstevel@tonic-gate 
13827a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
13837c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
13847c478bd9Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
13857c478bd9Sstevel@tonic-gate 
13867c478bd9Sstevel@tonic-gate 		/* Add the interrupt handler */
13877c478bd9Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
13887c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
13897a364d25Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
13907c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13917c478bd9Sstevel@tonic-gate 		break;
13927c478bd9Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
13937c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13947c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13957c478bd9Sstevel@tonic-gate 
13967c478bd9Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
13977c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13987c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13997c478bd9Sstevel@tonic-gate 
14007a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14017c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14027c478bd9Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14037c478bd9Sstevel@tonic-gate 
14047c478bd9Sstevel@tonic-gate 		/* Remove the interrupt handler */
14057c478bd9Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14067c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14077c478bd9Sstevel@tonic-gate 		break;
14087c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14097c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14107c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14117c478bd9Sstevel@tonic-gate 
14127c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14137c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14147c478bd9Sstevel@tonic-gate 		break;
14157c478bd9Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14167c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14177c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14187c478bd9Sstevel@tonic-gate 
14197c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14207c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14217c478bd9Sstevel@tonic-gate 		break;
14227c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14237c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14247c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14257c478bd9Sstevel@tonic-gate 
14267c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14277c478bd9Sstevel@tonic-gate 		    result)) {
14287c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
14297c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14307c478bd9Sstevel@tonic-gate 		}
14317c478bd9Sstevel@tonic-gate 		break;
1432a54f81fbSanish 	case DDI_INTROP_NAVAIL:
14337c478bd9Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
1434a54f81fbSanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
1435a54f81fbSanish 		if (*(int *)result == 0) {
14367c478bd9Sstevel@tonic-gate 			/*
14377c478bd9Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
14387c478bd9Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
14397c478bd9Sstevel@tonic-gate 			 *
14407c478bd9Sstevel@tonic-gate 			 * See detailed comments on this in the function
14417c478bd9Sstevel@tonic-gate 			 * rootnex_get_ispec().
14427c478bd9Sstevel@tonic-gate 			 *
14437c478bd9Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
14447c478bd9Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
14457c478bd9Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
14467c478bd9Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
14477c478bd9Sstevel@tonic-gate 			 */
14487c478bd9Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
14497c478bd9Sstevel@tonic-gate 				*(int *)result = 1;
1450a54f81fbSanish 			else
1451a54f81fbSanish 				return (DDI_FAILURE);
14527c478bd9Sstevel@tonic-gate 		}
14537c478bd9Sstevel@tonic-gate 		break;
14547c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
1455a54f81fbSanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
14567c478bd9Sstevel@tonic-gate 		break;
14577c478bd9Sstevel@tonic-gate 	default:
14587c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
14597c478bd9Sstevel@tonic-gate 	}
14607c478bd9Sstevel@tonic-gate 
14617c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
14627c478bd9Sstevel@tonic-gate }
14637c478bd9Sstevel@tonic-gate 
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate /*
146612f080e7Smrj  * rootnex_get_ispec()
146712f080e7Smrj  *	convert an interrupt number to an interrupt specification.
146812f080e7Smrj  *	The interrupt number determines which interrupt spec will be
146912f080e7Smrj  *	returned if more than one exists.
147012f080e7Smrj  *
147112f080e7Smrj  *	Look into the parent private data area of the 'rdip' to find out
147212f080e7Smrj  *	the interrupt specification.  First check to make sure there is
147312f080e7Smrj  *	one that matchs "inumber" and then return a pointer to it.
147412f080e7Smrj  *
147512f080e7Smrj  *	Return NULL if one could not be found.
147612f080e7Smrj  *
147712f080e7Smrj  *	NOTE: This is needed for rootnex_intr_ops()
14787c478bd9Sstevel@tonic-gate  */
147912f080e7Smrj static struct intrspec *
148012f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
14817c478bd9Sstevel@tonic-gate {
148212f080e7Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
14837c478bd9Sstevel@tonic-gate 
14847c478bd9Sstevel@tonic-gate 	/*
148512f080e7Smrj 	 * Special case handling for drivers that provide their own
148612f080e7Smrj 	 * intrspec structures instead of relying on the DDI framework.
148712f080e7Smrj 	 *
148812f080e7Smrj 	 * A broken hardware driver in ON could potentially provide its
148912f080e7Smrj 	 * own intrspec structure, instead of relying on the hardware.
149012f080e7Smrj 	 * If these drivers are children of 'rootnex' then we need to
149112f080e7Smrj 	 * continue to provide backward compatibility to them here.
149212f080e7Smrj 	 *
149312f080e7Smrj 	 * Following check is a special case for 'pcic' driver which
149412f080e7Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
149512f080e7Smrj 	 *
149612f080e7Smrj 	 * Verbatim comments from this driver are shown here:
149712f080e7Smrj 	 * "Don't use the ddi_add_intr since we don't have a
149812f080e7Smrj 	 * default intrspec in all cases."
149912f080e7Smrj 	 *
150012f080e7Smrj 	 * Since an 'ispec' may not be always created for it,
150112f080e7Smrj 	 * check for that and create one if so.
150212f080e7Smrj 	 *
150312f080e7Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
15047c478bd9Sstevel@tonic-gate 	 */
150512f080e7Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
150612f080e7Smrj 		pdp->par_nintr = 1;
150712f080e7Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
150812f080e7Smrj 		    pdp->par_nintr, KM_SLEEP);
150912f080e7Smrj 	}
151012f080e7Smrj 
151112f080e7Smrj 	/* Validate the interrupt number */
151212f080e7Smrj 	if (inum >= pdp->par_nintr)
151312f080e7Smrj 		return (NULL);
151412f080e7Smrj 
151512f080e7Smrj 	/* Get the interrupt structure pointer and return that */
151612f080e7Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
151712f080e7Smrj }
151812f080e7Smrj 
151912f080e7Smrj 
152012f080e7Smrj /*
152112f080e7Smrj  * ******************
152212f080e7Smrj  *  dma related code
152312f080e7Smrj  * ******************
152412f080e7Smrj  */
152512f080e7Smrj 
152612f080e7Smrj /*
152712f080e7Smrj  * rootnex_dma_allochdl()
152812f080e7Smrj  *    called from ddi_dma_alloc_handle().
152912f080e7Smrj  */
153012f080e7Smrj /*ARGSUSED*/
153112f080e7Smrj static int
153212f080e7Smrj rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
153312f080e7Smrj     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
153412f080e7Smrj {
153512f080e7Smrj 	uint64_t maxsegmentsize_ll;
153612f080e7Smrj 	uint_t maxsegmentsize;
153712f080e7Smrj 	ddi_dma_impl_t *hp;
153812f080e7Smrj 	rootnex_dma_t *dma;
153912f080e7Smrj 	uint64_t count_max;
154012f080e7Smrj 	uint64_t seg;
154112f080e7Smrj 	int kmflag;
154212f080e7Smrj 	int e;
154312f080e7Smrj 
154412f080e7Smrj 
154512f080e7Smrj 	/* convert our sleep flags */
154612f080e7Smrj 	if (waitfp == DDI_DMA_SLEEP) {
154712f080e7Smrj 		kmflag = KM_SLEEP;
154812f080e7Smrj 	} else {
154912f080e7Smrj 		kmflag = KM_NOSLEEP;
155012f080e7Smrj 	}
155112f080e7Smrj 
155212f080e7Smrj 	/*
155312f080e7Smrj 	 * We try to do only one memory allocation here. We'll do a little
155412f080e7Smrj 	 * pointer manipulation later. If the bind ends up taking more than
155512f080e7Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
155612f080e7Smrj 	 * bind operation. Not great, but much better than before and the
155712f080e7Smrj 	 * best we can do with the current bind interfaces.
155812f080e7Smrj 	 */
155912f080e7Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
156012f080e7Smrj 	if (hp == NULL) {
156112f080e7Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
156212f080e7Smrj 			ddi_set_callback(waitfp, arg,
156312f080e7Smrj 			    &rootnex_state->r_dvma_call_list_id);
156412f080e7Smrj 		}
156512f080e7Smrj 		return (DDI_DMA_NORESOURCES);
156612f080e7Smrj 	}
156712f080e7Smrj 
156812f080e7Smrj 	/* Do our pointer manipulation now, align the structures */
156912f080e7Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
157012f080e7Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
157112f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
157212f080e7Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
157312f080e7Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
157412f080e7Smrj 
157512f080e7Smrj 	/* setup the handle */
157612f080e7Smrj 	rootnex_clean_dmahdl(hp);
157712f080e7Smrj 	dma->dp_dip = rdip;
157812f080e7Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
157912f080e7Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
158012f080e7Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
158112f080e7Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
158212f080e7Smrj 	hp->dmai_rdip = rdip;
158312f080e7Smrj 	hp->dmai_attr = *attr;
158412f080e7Smrj 
158512f080e7Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
158612f080e7Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
158712f080e7Smrj 
158812f080e7Smrj 	/*
158912f080e7Smrj 	 * Figure out our maximum segment size. If the segment size is greater
159012f080e7Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
159112f080e7Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
159212f080e7Smrj 	 * dma_attr_count_max are size-1 type values.
159312f080e7Smrj 	 *
159412f080e7Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
159512f080e7Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
159612f080e7Smrj 	 * single cookie).
159712f080e7Smrj 	 */
159812f080e7Smrj 
159912f080e7Smrj 	/* handle the rollover cases */
160012f080e7Smrj 	seg = attr->dma_attr_seg + 1;
160112f080e7Smrj 	if (seg < attr->dma_attr_seg) {
160212f080e7Smrj 		seg = attr->dma_attr_seg;
160312f080e7Smrj 	}
160412f080e7Smrj 	count_max = attr->dma_attr_count_max + 1;
160512f080e7Smrj 	if (count_max < attr->dma_attr_count_max) {
160612f080e7Smrj 		count_max = attr->dma_attr_count_max;
160712f080e7Smrj 	}
160812f080e7Smrj 
160912f080e7Smrj 	/*
161012f080e7Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
161112f080e7Smrj 	 * use a simple mask.
161212f080e7Smrj 	 */
161312f080e7Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
161412f080e7Smrj 		dma->dp_granularity_power_2 = B_FALSE;
161512f080e7Smrj 	} else {
161612f080e7Smrj 		dma->dp_granularity_power_2 = B_TRUE;
161712f080e7Smrj 	}
161812f080e7Smrj 
161912f080e7Smrj 	/*
162012f080e7Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
162112f080e7Smrj 	 * break up a window because we're greater than maxxfer, we might as
162212f080e7Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
162312f080e7Smrj 	 * worry about triming the window later on for this case.
162412f080e7Smrj 	 */
162512f080e7Smrj 	if (attr->dma_attr_granular > 1) {
162612f080e7Smrj 		if (dma->dp_granularity_power_2) {
162712f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
162812f080e7Smrj 			    (attr->dma_attr_maxxfer &
162912f080e7Smrj 			    (attr->dma_attr_granular - 1));
163012f080e7Smrj 		} else {
163112f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
163212f080e7Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
163312f080e7Smrj 		}
163412f080e7Smrj 	} else {
163512f080e7Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
163612f080e7Smrj 	}
163712f080e7Smrj 
163812f080e7Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
163912f080e7Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
164012f080e7Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
164112f080e7Smrj 		maxsegmentsize = 0xFFFFFFFF;
164212f080e7Smrj 	} else {
164312f080e7Smrj 		maxsegmentsize = maxsegmentsize_ll;
164412f080e7Smrj 	}
164512f080e7Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
164612f080e7Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
164712f080e7Smrj 
164812f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
164912f080e7Smrj 	if (rootnex_alloc_check_parms) {
165012f080e7Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
165112f080e7Smrj 		if (e != DDI_SUCCESS) {
165212f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
165312f080e7Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
165412f080e7Smrj 			    (ddi_dma_handle_t)hp);
165512f080e7Smrj 			return (e);
165612f080e7Smrj 		}
165712f080e7Smrj 	}
165812f080e7Smrj 
165912f080e7Smrj 	*handlep = (ddi_dma_handle_t)hp;
166012f080e7Smrj 
166112f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
166212f080e7Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
166312f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
166412f080e7Smrj 
166512f080e7Smrj 	return (DDI_SUCCESS);
166612f080e7Smrj }
166712f080e7Smrj 
166812f080e7Smrj 
166912f080e7Smrj /*
167012f080e7Smrj  * rootnex_dma_freehdl()
167112f080e7Smrj  *    called from ddi_dma_free_handle().
167212f080e7Smrj  */
167312f080e7Smrj /*ARGSUSED*/
167412f080e7Smrj static int
167512f080e7Smrj rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
167612f080e7Smrj {
167712f080e7Smrj 	ddi_dma_impl_t *hp;
167812f080e7Smrj 	rootnex_dma_t *dma;
167912f080e7Smrj 
168012f080e7Smrj 
168112f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
168212f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
168312f080e7Smrj 
168412f080e7Smrj 	/* unbind should have been called first */
168512f080e7Smrj 	ASSERT(!dma->dp_inuse);
168612f080e7Smrj 
168712f080e7Smrj 	mutex_destroy(&dma->dp_mutex);
168812f080e7Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
168912f080e7Smrj 
169012f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
169112f080e7Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
169212f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
169312f080e7Smrj 
169412f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
169512f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
169612f080e7Smrj 
169712f080e7Smrj 	return (DDI_SUCCESS);
169812f080e7Smrj }
169912f080e7Smrj 
170012f080e7Smrj 
170112f080e7Smrj /*
170212f080e7Smrj  * rootnex_dma_bindhdl()
170312f080e7Smrj  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
170412f080e7Smrj  */
170512f080e7Smrj /*ARGSUSED*/
170612f080e7Smrj static int
170712f080e7Smrj rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
170812f080e7Smrj     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
170912f080e7Smrj {
171012f080e7Smrj 	rootnex_sglinfo_t *sinfo;
171112f080e7Smrj 	ddi_dma_attr_t *attr;
171212f080e7Smrj 	ddi_dma_impl_t *hp;
171312f080e7Smrj 	rootnex_dma_t *dma;
171412f080e7Smrj 	int kmflag;
171512f080e7Smrj 	int e;
171612f080e7Smrj 
171712f080e7Smrj 
171812f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
171912f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
172012f080e7Smrj 	sinfo = &dma->dp_sglinfo;
172112f080e7Smrj 	attr = &hp->dmai_attr;
172212f080e7Smrj 
172312f080e7Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
172412f080e7Smrj 
172512f080e7Smrj 	/*
172612f080e7Smrj 	 * This is useful for debugging a driver. Not as useful in a production
172712f080e7Smrj 	 * system. The only time this will fail is if you have a driver bug.
172812f080e7Smrj 	 */
172912f080e7Smrj 	if (rootnex_bind_check_inuse) {
173012f080e7Smrj 		/*
173112f080e7Smrj 		 * No one else should ever have this lock unless someone else
173212f080e7Smrj 		 * is trying to use this handle. So contention on the lock
173312f080e7Smrj 		 * is the same as inuse being set.
173412f080e7Smrj 		 */
173512f080e7Smrj 		e = mutex_tryenter(&dma->dp_mutex);
173612f080e7Smrj 		if (e == 0) {
173712f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
173812f080e7Smrj 			return (DDI_DMA_INUSE);
173912f080e7Smrj 		}
174012f080e7Smrj 		if (dma->dp_inuse) {
174112f080e7Smrj 			mutex_exit(&dma->dp_mutex);
174212f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
174312f080e7Smrj 			return (DDI_DMA_INUSE);
174412f080e7Smrj 		}
174512f080e7Smrj 		dma->dp_inuse = B_TRUE;
174612f080e7Smrj 		mutex_exit(&dma->dp_mutex);
174712f080e7Smrj 	}
174812f080e7Smrj 
174912f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
175012f080e7Smrj 	if (rootnex_bind_check_parms) {
175112f080e7Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
175212f080e7Smrj 		if (e != DDI_SUCCESS) {
175312f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
175412f080e7Smrj 			rootnex_clean_dmahdl(hp);
175512f080e7Smrj 			return (e);
175612f080e7Smrj 		}
175712f080e7Smrj 	}
175812f080e7Smrj 
175912f080e7Smrj 	/* save away the original bind info */
176012f080e7Smrj 	dma->dp_dma = dmareq->dmar_object;
176112f080e7Smrj 
176212f080e7Smrj 	/*
176312f080e7Smrj 	 * Figure out a rough estimate of what maximum number of pages this
176412f080e7Smrj 	 * buffer could use (a high estimate of course).
176512f080e7Smrj 	 */
176612f080e7Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
176712f080e7Smrj 
176812f080e7Smrj 	/*
176912f080e7Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
177012f080e7Smrj 	 * fit (more important to be consistent, we don't want to create
177112f080e7Smrj 	 * additional degenerate cases).
177212f080e7Smrj 	 */
177312f080e7Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
177412f080e7Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
177512f080e7Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
177612f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
177712f080e7Smrj 		    uint_t, sinfo->si_max_pages);
177812f080e7Smrj 
177912f080e7Smrj 	/*
178012f080e7Smrj 	 * For anything larger than that, we'll go ahead and allocate the
178112f080e7Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
178212f080e7Smrj 	 * seeing this path in the fast path for high performance devices very
178312f080e7Smrj 	 * frequently.
178412f080e7Smrj 	 *
178512f080e7Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
178612f080e7Smrj 	 * the bind interface would speed this case up.
178712f080e7Smrj 	 */
178812f080e7Smrj 	} else {
178912f080e7Smrj 		/* convert the sleep flags */
179012f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
179112f080e7Smrj 			kmflag =  KM_SLEEP;
179212f080e7Smrj 		} else {
179312f080e7Smrj 			kmflag =  KM_NOSLEEP;
179412f080e7Smrj 		}
179512f080e7Smrj 
179612f080e7Smrj 		/*
179712f080e7Smrj 		 * Save away how much memory we allocated. If we're doing a
179812f080e7Smrj 		 * nosleep, the alloc could fail...
179912f080e7Smrj 		 */
180012f080e7Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
180112f080e7Smrj 		    sizeof (ddi_dma_cookie_t);
180212f080e7Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
180312f080e7Smrj 		if (dma->dp_cookies == NULL) {
180412f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
180512f080e7Smrj 			rootnex_clean_dmahdl(hp);
180612f080e7Smrj 			return (DDI_DMA_NORESOURCES);
180712f080e7Smrj 		}
180812f080e7Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
180912f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
181012f080e7Smrj 		    sinfo->si_max_pages);
181112f080e7Smrj 	}
181212f080e7Smrj 	hp->dmai_cookie = dma->dp_cookies;
181312f080e7Smrj 
181412f080e7Smrj 	/*
181512f080e7Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
181612f080e7Smrj 	 * looking at the contraints in the dma structure. It will then put some
181712f080e7Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
181812f080e7Smrj 	 * clean, or do we need to do some munging; how many pages need to be
181912f080e7Smrj 	 * copied, etc.)
182012f080e7Smrj 	 */
182112f080e7Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
182212f080e7Smrj 	    &dma->dp_sglinfo);
182312f080e7Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
182412f080e7Smrj 
182512f080e7Smrj 	/* if we don't need a copy buffer, we don't need to sync */
182612f080e7Smrj 	if (sinfo->si_copybuf_req == 0) {
182712f080e7Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
182812f080e7Smrj 	}
182912f080e7Smrj 
183012f080e7Smrj 	/*
183112f080e7Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
183212f080e7Smrj 	 * hit the fast path. All the high performance devices should be trying
183312f080e7Smrj 	 * to hit this path. To hit this path, a device should be able to reach
183412f080e7Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
183512f080e7Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
183612f080e7Smrj 	 * handle [sgllen]).
183712f080e7Smrj 	 */
183812f080e7Smrj 	if ((sinfo->si_copybuf_req == 0) &&
183912f080e7Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
184012f080e7Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
184112f080e7Smrj 		/*
184285c8e0e8Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
184385c8e0e8Sstephh 		 * handle cache.
184485c8e0e8Sstephh 		 */
184585c8e0e8Sstephh 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
184685c8e0e8Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
184785c8e0e8Sstephh 			(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
184885c8e0e8Sstephh 		}
184985c8e0e8Sstephh 
185085c8e0e8Sstephh 		/*
185112f080e7Smrj 		 * copy out the first cookie and ccountp, set the cookie
185212f080e7Smrj 		 * pointer to the second cookie. The first cookie is passed
185312f080e7Smrj 		 * back on the stack. Additional cookies are accessed via
185412f080e7Smrj 		 * ddi_dma_nextcookie()
185512f080e7Smrj 		 */
185612f080e7Smrj 		*cookiep = dma->dp_cookies[0];
185712f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
185812f080e7Smrj 		hp->dmai_cookie++;
185912f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
186012f080e7Smrj 		hp->dmai_nwin = 1;
186112f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
186212f080e7Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
186312f080e7Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
186412f080e7Smrj 		    dma->dp_dma.dmao_size);
186512f080e7Smrj 		return (DDI_DMA_MAPPED);
186612f080e7Smrj 	}
186712f080e7Smrj 
186812f080e7Smrj 	/*
186912f080e7Smrj 	 * go to the slow path, we may need to alloc more memory, create
187012f080e7Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
187112f080e7Smrj 	 */
187212f080e7Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
187312f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
187412f080e7Smrj 		if (dma->dp_need_to_free_cookie) {
187512f080e7Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
187612f080e7Smrj 		}
187712f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
187812f080e7Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
187912f080e7Smrj 		return (e);
188012f080e7Smrj 	}
188112f080e7Smrj 
188285c8e0e8Sstephh 	/*
188385c8e0e8Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
188485c8e0e8Sstephh 	 * cache.
188585c8e0e8Sstephh 	 */
188685c8e0e8Sstephh 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
188785c8e0e8Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
188885c8e0e8Sstephh 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
188985c8e0e8Sstephh 	}
189085c8e0e8Sstephh 
189112f080e7Smrj 	/* if the first window uses the copy buffer, sync it for the device */
189212f080e7Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
189312f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
189412f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
189512f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
189612f080e7Smrj 	}
189712f080e7Smrj 
189812f080e7Smrj 	/*
189912f080e7Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
190012f080e7Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
190112f080e7Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
190212f080e7Smrj 	 * cookies we return is the number of cookies in the first window.
190312f080e7Smrj 	 */
190412f080e7Smrj 	if (e == DDI_DMA_MAPPED) {
190512f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
190612f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
190712f080e7Smrj 	} else {
190812f080e7Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
190912f080e7Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
191012f080e7Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
191112f080e7Smrj 	}
191212f080e7Smrj 	*cookiep = dma->dp_cookies[0];
191312f080e7Smrj 	hp->dmai_cookie++;
191412f080e7Smrj 
191512f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
191612f080e7Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
191712f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
191812f080e7Smrj 	    dma->dp_dma.dmao_size);
191912f080e7Smrj 	return (e);
192012f080e7Smrj }
192112f080e7Smrj 
192212f080e7Smrj 
192312f080e7Smrj /*
192412f080e7Smrj  * rootnex_dma_unbindhdl()
192512f080e7Smrj  *    called from ddi_dma_unbind_handle()
192612f080e7Smrj  */
192712f080e7Smrj /*ARGSUSED*/
192812f080e7Smrj static int
192912f080e7Smrj rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
193012f080e7Smrj     ddi_dma_handle_t handle)
193112f080e7Smrj {
193212f080e7Smrj 	ddi_dma_impl_t *hp;
193312f080e7Smrj 	rootnex_dma_t *dma;
193412f080e7Smrj 	int e;
193512f080e7Smrj 
193612f080e7Smrj 
193712f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
193812f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
193912f080e7Smrj 
194012f080e7Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
194112f080e7Smrj 	if (rootnex_unbind_verify_buffer) {
194212f080e7Smrj 		e = rootnex_verify_buffer(dma);
194312f080e7Smrj 		if (e != DDI_SUCCESS) {
194412f080e7Smrj 			ASSERT(0);
194512f080e7Smrj 			return (DDI_FAILURE);
194612f080e7Smrj 		}
194712f080e7Smrj 	}
194812f080e7Smrj 
194912f080e7Smrj 	/* sync the current window before unbinding the buffer */
195012f080e7Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
195112f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
195212f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
195312f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
195412f080e7Smrj 	}
195512f080e7Smrj 
195612f080e7Smrj 	/*
195700d0963fSdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
195800d0963fSdilpreet 	 * cache.
195900d0963fSdilpreet 	 */
196000d0963fSdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
196100d0963fSdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
196200d0963fSdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
196300d0963fSdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
196400d0963fSdilpreet 		}
196500d0963fSdilpreet 	}
196600d0963fSdilpreet 
196700d0963fSdilpreet 	/*
196812f080e7Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
196912f080e7Smrj 	 * buffer or windows, there won't be much to do :-)
197012f080e7Smrj 	 */
197112f080e7Smrj 	rootnex_teardown_copybuf(dma);
197212f080e7Smrj 	rootnex_teardown_windows(dma);
197312f080e7Smrj 
197412f080e7Smrj 	/*
197512f080e7Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
197612f080e7Smrj 	 * fit into our pre-allocate buffer), free that up now
197712f080e7Smrj 	 */
197812f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
197912f080e7Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
198012f080e7Smrj 	}
198112f080e7Smrj 
198212f080e7Smrj 	/*
198312f080e7Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
198412f080e7Smrj 	 * handle is reused).
198512f080e7Smrj 	 */
198612f080e7Smrj 	rootnex_clean_dmahdl(hp);
198712f080e7Smrj 
198812f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
198912f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
199012f080e7Smrj 
199112f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
199212f080e7Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
199312f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
199412f080e7Smrj 
199512f080e7Smrj 	return (DDI_SUCCESS);
199612f080e7Smrj }
199712f080e7Smrj 
199812f080e7Smrj 
199912f080e7Smrj /*
200012f080e7Smrj  * rootnex_verify_buffer()
200112f080e7Smrj  *   verify buffer wasn't free'd
200212f080e7Smrj  */
200312f080e7Smrj static int
200412f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
200512f080e7Smrj {
200612f080e7Smrj 	page_t **pplist;
200712f080e7Smrj 	caddr_t vaddr;
200812f080e7Smrj 	uint_t pcnt;
200912f080e7Smrj 	uint_t poff;
201012f080e7Smrj 	page_t *pp;
201100d0963fSdilpreet 	char b;
201212f080e7Smrj 	int i;
201312f080e7Smrj 
201412f080e7Smrj 	/* Figure out how many pages this buffer occupies */
201512f080e7Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
201612f080e7Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
201712f080e7Smrj 	} else {
201812f080e7Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
201912f080e7Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
202012f080e7Smrj 	}
202112f080e7Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
202212f080e7Smrj 
202312f080e7Smrj 	switch (dma->dp_dma.dmao_type) {
202412f080e7Smrj 	case DMA_OTYP_PAGES:
202512f080e7Smrj 		/*
202612f080e7Smrj 		 * for a linked list of pp's walk through them to make sure
202712f080e7Smrj 		 * they're locked and not free.
202812f080e7Smrj 		 */
202912f080e7Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
203012f080e7Smrj 		for (i = 0; i < pcnt; i++) {
203112f080e7Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
203212f080e7Smrj 				return (DDI_FAILURE);
203312f080e7Smrj 			}
20347c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
20357c478bd9Sstevel@tonic-gate 		}
20367c478bd9Sstevel@tonic-gate 		break;
203712f080e7Smrj 
20387c478bd9Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
20397c478bd9Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
204012f080e7Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
204112f080e7Smrj 		/*
204212f080e7Smrj 		 * for an array of pp's walk through them to make sure they're
204312f080e7Smrj 		 * not free. It's possible that they may not be locked.
204412f080e7Smrj 		 */
204512f080e7Smrj 		if (pplist) {
204612f080e7Smrj 			for (i = 0; i < pcnt; i++) {
204712f080e7Smrj 				if (PP_ISFREE(pplist[i])) {
204812f080e7Smrj 					return (DDI_FAILURE);
204912f080e7Smrj 				}
205012f080e7Smrj 			}
205112f080e7Smrj 
205212f080e7Smrj 		/* For a virtual address, try to peek at each page */
205312f080e7Smrj 		} else {
205412f080e7Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
205512f080e7Smrj 				for (i = 0; i < pcnt; i++) {
205600d0963fSdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
205700d0963fSdilpreet 					    DDI_FAILURE)
205812f080e7Smrj 						return (DDI_FAILURE);
205900d0963fSdilpreet 					vaddr += MMU_PAGESIZE;
206012f080e7Smrj 				}
206112f080e7Smrj 			}
206212f080e7Smrj 		}
206312f080e7Smrj 		break;
206412f080e7Smrj 
206512f080e7Smrj 	default:
206612f080e7Smrj 		ASSERT(0);
206712f080e7Smrj 		break;
206812f080e7Smrj 	}
206912f080e7Smrj 
207012f080e7Smrj 	return (DDI_SUCCESS);
207112f080e7Smrj }
207212f080e7Smrj 
207312f080e7Smrj 
207412f080e7Smrj /*
207512f080e7Smrj  * rootnex_clean_dmahdl()
207612f080e7Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
207712f080e7Smrj  *    unbind handle. Set the handle state to the default settings.
207812f080e7Smrj  */
207912f080e7Smrj static void
208012f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
208112f080e7Smrj {
208212f080e7Smrj 	rootnex_dma_t *dma;
208312f080e7Smrj 
208412f080e7Smrj 
208512f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
208612f080e7Smrj 
208712f080e7Smrj 	hp->dmai_nwin = 0;
208812f080e7Smrj 	dma->dp_current_cookie = 0;
208912f080e7Smrj 	dma->dp_copybuf_size = 0;
209012f080e7Smrj 	dma->dp_window = NULL;
209112f080e7Smrj 	dma->dp_cbaddr = NULL;
209212f080e7Smrj 	dma->dp_inuse = B_FALSE;
209312f080e7Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
209412f080e7Smrj 	dma->dp_need_to_free_window = B_FALSE;
209512f080e7Smrj 	dma->dp_partial_required = B_FALSE;
209612f080e7Smrj 	dma->dp_trim_required = B_FALSE;
209712f080e7Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
209812f080e7Smrj #if !defined(__amd64)
209912f080e7Smrj 	dma->dp_cb_remaping = B_FALSE;
210012f080e7Smrj 	dma->dp_kva = NULL;
210112f080e7Smrj #endif
210212f080e7Smrj 
210312f080e7Smrj 	/* FMA related initialization */
210412f080e7Smrj 	hp->dmai_fault = 0;
210512f080e7Smrj 	hp->dmai_fault_check = NULL;
210612f080e7Smrj 	hp->dmai_fault_notify = NULL;
210712f080e7Smrj 	hp->dmai_error.err_ena = 0;
210812f080e7Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
210912f080e7Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
211012f080e7Smrj 	hp->dmai_error.err_ontrap = NULL;
211112f080e7Smrj 	hp->dmai_error.err_fep = NULL;
211200d0963fSdilpreet 	hp->dmai_error.err_cf = NULL;
211312f080e7Smrj }
211412f080e7Smrj 
211512f080e7Smrj 
211612f080e7Smrj /*
211712f080e7Smrj  * rootnex_valid_alloc_parms()
211812f080e7Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
211912f080e7Smrj  */
212012f080e7Smrj static int
212112f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
212212f080e7Smrj {
212312f080e7Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
212412f080e7Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
212512f080e7Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
212612f080e7Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
212712f080e7Smrj 		return (DDI_DMA_BADATTR);
212812f080e7Smrj 	}
212912f080e7Smrj 
213012f080e7Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
213112f080e7Smrj 		return (DDI_DMA_BADATTR);
213212f080e7Smrj 	}
213312f080e7Smrj 
213412f080e7Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
213512f080e7Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
213612f080e7Smrj 	    attr->dma_attr_sgllen <= 0) {
213712f080e7Smrj 		return (DDI_DMA_BADATTR);
213812f080e7Smrj 	}
213912f080e7Smrj 
214012f080e7Smrj 	/* We should be able to DMA into every byte offset in a page */
214112f080e7Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
214212f080e7Smrj 		return (DDI_DMA_BADATTR);
214312f080e7Smrj 	}
214412f080e7Smrj 
214512f080e7Smrj 	return (DDI_SUCCESS);
214612f080e7Smrj }
214712f080e7Smrj 
214812f080e7Smrj 
214912f080e7Smrj /*
215012f080e7Smrj  * rootnex_valid_bind_parms()
215112f080e7Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
215212f080e7Smrj  */
215312f080e7Smrj /* ARGSUSED */
215412f080e7Smrj static int
215512f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
215612f080e7Smrj {
215712f080e7Smrj #if !defined(__amd64)
215812f080e7Smrj 	/*
215912f080e7Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
216012f080e7Smrj 	 * we can track the offset for the obsoleted interfaces.
216112f080e7Smrj 	 */
216212f080e7Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
216312f080e7Smrj 		return (DDI_DMA_TOOBIG);
216412f080e7Smrj 	}
216512f080e7Smrj #endif
216612f080e7Smrj 
216712f080e7Smrj 	return (DDI_SUCCESS);
216812f080e7Smrj }
216912f080e7Smrj 
217012f080e7Smrj 
217112f080e7Smrj /*
217212f080e7Smrj  * rootnex_get_sgl()
217312f080e7Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
217412f080e7Smrj  *    with a call to the vm layer when vm2.0 comes around...
217512f080e7Smrj  */
217612f080e7Smrj static void
217712f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
217812f080e7Smrj     rootnex_sglinfo_t *sglinfo)
217912f080e7Smrj {
218012f080e7Smrj 	ddi_dma_atyp_t buftype;
2181843e1988Sjohnlev 	rootnex_addr_t raddr;
218212f080e7Smrj 	uint64_t last_page;
218312f080e7Smrj 	uint64_t offset;
218412f080e7Smrj 	uint64_t addrhi;
218512f080e7Smrj 	uint64_t addrlo;
218612f080e7Smrj 	uint64_t maxseg;
218712f080e7Smrj 	page_t **pplist;
218812f080e7Smrj 	uint64_t paddr;
218912f080e7Smrj 	uint32_t psize;
219012f080e7Smrj 	uint32_t size;
219112f080e7Smrj 	caddr_t vaddr;
219212f080e7Smrj 	uint_t pcnt;
219312f080e7Smrj 	page_t *pp;
219412f080e7Smrj 	uint_t cnt;
219512f080e7Smrj 
219612f080e7Smrj 
219712f080e7Smrj 	/* shortcuts */
219812f080e7Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
219912f080e7Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
220012f080e7Smrj 	maxseg = sglinfo->si_max_cookie_size;
220112f080e7Smrj 	buftype = dmar_object->dmao_type;
220212f080e7Smrj 	addrhi = sglinfo->si_max_addr;
220312f080e7Smrj 	addrlo = sglinfo->si_min_addr;
220412f080e7Smrj 	size = dmar_object->dmao_size;
220512f080e7Smrj 
220612f080e7Smrj 	pcnt = 0;
220712f080e7Smrj 	cnt = 0;
220812f080e7Smrj 
220912f080e7Smrj 	/*
221012f080e7Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
221112f080e7Smrj 	 * page_t, use this to get our physical address and buf offset.
221212f080e7Smrj 	 */
221312f080e7Smrj 	if (buftype == DMA_OTYP_PAGES) {
221412f080e7Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
221512f080e7Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
221612f080e7Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
221712f080e7Smrj 		    MMU_PAGEOFFSET;
2218843e1988Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
221912f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
222012f080e7Smrj 		pp = pp->p_next;
222112f080e7Smrj 		sglinfo->si_asp = NULL;
222212f080e7Smrj 
222312f080e7Smrj 	/*
222412f080e7Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
222512f080e7Smrj 	 * down an array of pages, use this to get our physical address and buf
222612f080e7Smrj 	 * offset.
222712f080e7Smrj 	 */
222812f080e7Smrj 	} else if (pplist != NULL) {
222912f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
223012f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
223112f080e7Smrj 
223212f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
223312f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
223412f080e7Smrj 		if (sglinfo->si_asp == NULL) {
223512f080e7Smrj 			sglinfo->si_asp = &kas;
223612f080e7Smrj 		}
223712f080e7Smrj 
223812f080e7Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2239843e1988Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
224012f080e7Smrj 		paddr += offset;
224112f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
224212f080e7Smrj 		pcnt++;
224312f080e7Smrj 
224412f080e7Smrj 	/*
224512f080e7Smrj 	 * All we have is a virtual address, we'll need to call into the VM
224612f080e7Smrj 	 * to get the physical address.
224712f080e7Smrj 	 */
224812f080e7Smrj 	} else {
224912f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
225012f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
225112f080e7Smrj 
225212f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
225312f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
225412f080e7Smrj 		if (sglinfo->si_asp == NULL) {
225512f080e7Smrj 			sglinfo->si_asp = &kas;
225612f080e7Smrj 		}
225712f080e7Smrj 
2258843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
225912f080e7Smrj 		paddr += offset;
226012f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
226112f080e7Smrj 		vaddr += psize;
226212f080e7Smrj 	}
226312f080e7Smrj 
2264843e1988Sjohnlev #ifdef __xpv
2265843e1988Sjohnlev 	/*
2266843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
2267843e1988Sjohnlev 	 * the cookies with MFNs instead of PFNs.
2268843e1988Sjohnlev 	 */
2269843e1988Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
2270843e1988Sjohnlev #else
2271843e1988Sjohnlev 	raddr = paddr;
2272843e1988Sjohnlev #endif
2273843e1988Sjohnlev 
227412f080e7Smrj 	/*
227512f080e7Smrj 	 * Setup the first cookie with the physical address of the page and the
227612f080e7Smrj 	 * size of the page (which takes into account the initial offset into
227712f080e7Smrj 	 * the page.
227812f080e7Smrj 	 */
2279843e1988Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
228012f080e7Smrj 	sgl[cnt].dmac_size = psize;
228112f080e7Smrj 	sgl[cnt].dmac_type = 0;
228212f080e7Smrj 
228312f080e7Smrj 	/*
228412f080e7Smrj 	 * Save away the buffer offset into the page. We'll need this later in
228512f080e7Smrj 	 * the copy buffer code to help figure out the page index within the
228612f080e7Smrj 	 * buffer and the offset into the current page.
228712f080e7Smrj 	 */
228812f080e7Smrj 	sglinfo->si_buf_offset = offset;
228912f080e7Smrj 
229012f080e7Smrj 	/*
229112f080e7Smrj 	 * If the DMA engine can't reach the physical address, increase how
229212f080e7Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
229312f080e7Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
229412f080e7Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
229512f080e7Smrj 	 * last cookie, go to the next cookie (since we separate each page which
229612f080e7Smrj 	 * uses the copy buffer in case the copy buffer is not physically
229712f080e7Smrj 	 * contiguous.
229812f080e7Smrj 	 */
2299843e1988Sjohnlev 	if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
230012f080e7Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
230112f080e7Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
230212f080e7Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
230312f080e7Smrj 			cnt++;
230412f080e7Smrj 			sgl[cnt].dmac_laddress = 0;
230512f080e7Smrj 			sgl[cnt].dmac_size = 0;
230612f080e7Smrj 			sgl[cnt].dmac_type = 0;
230712f080e7Smrj 		}
230812f080e7Smrj 	}
230912f080e7Smrj 
231012f080e7Smrj 	/*
231112f080e7Smrj 	 * save this page's physical address so we can figure out if the next
231212f080e7Smrj 	 * page is physically contiguous. Keep decrementing size until we are
231312f080e7Smrj 	 * done with the buffer.
231412f080e7Smrj 	 */
2315843e1988Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
231612f080e7Smrj 	size -= psize;
231712f080e7Smrj 
231812f080e7Smrj 	while (size > 0) {
231912f080e7Smrj 		/* Get the size for this page (i.e. partial or full page) */
232012f080e7Smrj 		psize = MIN(size, MMU_PAGESIZE);
232112f080e7Smrj 
232212f080e7Smrj 		if (buftype == DMA_OTYP_PAGES) {
232312f080e7Smrj 			/* get the paddr from the page_t */
232412f080e7Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2325843e1988Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
232612f080e7Smrj 			pp = pp->p_next;
232712f080e7Smrj 		} else if (pplist != NULL) {
232812f080e7Smrj 			/* index into the array of page_t's to get the paddr */
232912f080e7Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2330843e1988Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
233112f080e7Smrj 			pcnt++;
233212f080e7Smrj 		} else {
233312f080e7Smrj 			/* call into the VM to get the paddr */
2334843e1988Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
233512f080e7Smrj 			    vaddr));
233612f080e7Smrj 			vaddr += psize;
233712f080e7Smrj 		}
233812f080e7Smrj 
2339843e1988Sjohnlev #ifdef __xpv
2340843e1988Sjohnlev 		/*
2341843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
2342843e1988Sjohnlev 		 * the cookies with MFNs instead of PFNs.
2343843e1988Sjohnlev 		 */
2344843e1988Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
2345843e1988Sjohnlev #else
2346843e1988Sjohnlev 		raddr = paddr;
2347843e1988Sjohnlev #endif
2348843e1988Sjohnlev 
234912f080e7Smrj 		/* check to see if this page needs the copy buffer */
2350843e1988Sjohnlev 		if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
235112f080e7Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
235212f080e7Smrj 
235312f080e7Smrj 			/*
235412f080e7Smrj 			 * if there is something in the current cookie, go to
235512f080e7Smrj 			 * the next one. We only want one page in a cookie which
235612f080e7Smrj 			 * uses the copybuf since the copybuf doesn't have to
235712f080e7Smrj 			 * be physically contiguous.
235812f080e7Smrj 			 */
235912f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
236012f080e7Smrj 				cnt++;
236112f080e7Smrj 			}
2362843e1988Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
236312f080e7Smrj 			sgl[cnt].dmac_size = psize;
236412f080e7Smrj #if defined(__amd64)
236512f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
236612f080e7Smrj #else
236712f080e7Smrj 			/*
236812f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
236912f080e7Smrj 			 * obsoleted interfaces.
237012f080e7Smrj 			 */
237112f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
237212f080e7Smrj 			    (dmar_object->dmao_size - size);
237312f080e7Smrj #endif
237412f080e7Smrj 			/* if this isn't the last cookie, go to the next one */
237512f080e7Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
237612f080e7Smrj 				cnt++;
237712f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
237812f080e7Smrj 				sgl[cnt].dmac_size = 0;
237912f080e7Smrj 				sgl[cnt].dmac_type = 0;
238012f080e7Smrj 			}
238112f080e7Smrj 
238212f080e7Smrj 		/*
238312f080e7Smrj 		 * this page didn't need the copy buffer, if it's not physically
238412f080e7Smrj 		 * contiguous, or it would put us over a segment boundary, or it
238512f080e7Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
238612f080e7Smrj 		 * have anything in it.
238712f080e7Smrj 		 */
2388843e1988Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
2389843e1988Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
239012f080e7Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
239112f080e7Smrj 		    (sgl[cnt].dmac_size == 0)) {
239212f080e7Smrj 			/*
239312f080e7Smrj 			 * if we're not already in a new cookie, go to the next
239412f080e7Smrj 			 * cookie.
239512f080e7Smrj 			 */
239612f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
239712f080e7Smrj 				cnt++;
239812f080e7Smrj 			}
239912f080e7Smrj 
240012f080e7Smrj 			/* save the cookie information */
2401843e1988Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
240212f080e7Smrj 			sgl[cnt].dmac_size = psize;
240312f080e7Smrj #if defined(__amd64)
240412f080e7Smrj 			sgl[cnt].dmac_type = 0;
240512f080e7Smrj #else
240612f080e7Smrj 			/*
240712f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
240812f080e7Smrj 			 * obsoleted interfaces.
240912f080e7Smrj 			 */
241012f080e7Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
241112f080e7Smrj #endif
241212f080e7Smrj 
241312f080e7Smrj 		/*
241412f080e7Smrj 		 * this page didn't need the copy buffer, it is physically
241512f080e7Smrj 		 * contiguous with the last page, and it's <= the max cookie
241612f080e7Smrj 		 * size.
241712f080e7Smrj 		 */
241812f080e7Smrj 		} else {
241912f080e7Smrj 			sgl[cnt].dmac_size += psize;
242012f080e7Smrj 
242112f080e7Smrj 			/*
242212f080e7Smrj 			 * if this exactly ==  the maximum cookie size, and
242312f080e7Smrj 			 * it isn't the last cookie, go to the next cookie.
242412f080e7Smrj 			 */
242512f080e7Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
242612f080e7Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
242712f080e7Smrj 				cnt++;
242812f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
242912f080e7Smrj 				sgl[cnt].dmac_size = 0;
243012f080e7Smrj 				sgl[cnt].dmac_type = 0;
243112f080e7Smrj 			}
243212f080e7Smrj 		}
243312f080e7Smrj 
243412f080e7Smrj 		/*
243512f080e7Smrj 		 * save this page's physical address so we can figure out if the
243612f080e7Smrj 		 * next page is physically contiguous. Keep decrementing size
243712f080e7Smrj 		 * until we are done with the buffer.
243812f080e7Smrj 		 */
2439843e1988Sjohnlev 		last_page = raddr;
244012f080e7Smrj 		size -= psize;
244112f080e7Smrj 	}
244212f080e7Smrj 
244312f080e7Smrj 	/* we're done, save away how many cookies the sgl has */
244412f080e7Smrj 	if (sgl[cnt].dmac_size == 0) {
244512f080e7Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
244612f080e7Smrj 		sglinfo->si_sgl_size = cnt;
244712f080e7Smrj 	} else {
244812f080e7Smrj 		sglinfo->si_sgl_size = cnt + 1;
244912f080e7Smrj 	}
245012f080e7Smrj }
245112f080e7Smrj 
245212f080e7Smrj 
245312f080e7Smrj /*
245412f080e7Smrj  * rootnex_bind_slowpath()
245512f080e7Smrj  *    Call in the bind path if the calling driver can't use the sgl without
245612f080e7Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
245712f080e7Smrj  *    with a partial bind.
245812f080e7Smrj  */
245912f080e7Smrj static int
246012f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
246112f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
246212f080e7Smrj {
246312f080e7Smrj 	rootnex_sglinfo_t *sinfo;
246412f080e7Smrj 	rootnex_window_t *window;
246512f080e7Smrj 	ddi_dma_cookie_t *cookie;
246612f080e7Smrj 	size_t copybuf_used;
246712f080e7Smrj 	size_t dmac_size;
246812f080e7Smrj 	boolean_t partial;
246912f080e7Smrj 	off_t cur_offset;
247012f080e7Smrj 	page_t *cur_pp;
247112f080e7Smrj 	major_t mnum;
247212f080e7Smrj 	int e;
247312f080e7Smrj 	int i;
247412f080e7Smrj 
247512f080e7Smrj 
247612f080e7Smrj 	sinfo = &dma->dp_sglinfo;
247712f080e7Smrj 	copybuf_used = 0;
247812f080e7Smrj 	partial = B_FALSE;
247912f080e7Smrj 
248012f080e7Smrj 	/*
248112f080e7Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
248212f080e7Smrj 	 * Needs to be first since it sets the copy buffer size.
248312f080e7Smrj 	 */
248412f080e7Smrj 	if (sinfo->si_copybuf_req != 0) {
248512f080e7Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
248612f080e7Smrj 		if (e != DDI_SUCCESS) {
248712f080e7Smrj 			return (e);
248812f080e7Smrj 		}
248912f080e7Smrj 	} else {
249012f080e7Smrj 		dma->dp_copybuf_size = 0;
249112f080e7Smrj 	}
249212f080e7Smrj 
249312f080e7Smrj 	/*
249412f080e7Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
249512f080e7Smrj 	 * if we need to trim the buffers when we munge the sgl.
249612f080e7Smrj 	 */
249712f080e7Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
249812f080e7Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
249912f080e7Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
250012f080e7Smrj 		dma->dp_partial_required = B_TRUE;
250112f080e7Smrj 		if (attr->dma_attr_granular != 1) {
250212f080e7Smrj 			dma->dp_trim_required = B_TRUE;
250312f080e7Smrj 		}
250412f080e7Smrj 	} else {
250512f080e7Smrj 		dma->dp_partial_required = B_FALSE;
250612f080e7Smrj 		dma->dp_trim_required = B_FALSE;
250712f080e7Smrj 	}
250812f080e7Smrj 
250912f080e7Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
251012f080e7Smrj 	if (dma->dp_partial_required &&
251112f080e7Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
251212f080e7Smrj 
251312f080e7Smrj 		mnum = ddi_driver_major(dma->dp_dip);
251412f080e7Smrj 		/*
251512f080e7Smrj 		 * patchable which allows us to print one warning per major
251612f080e7Smrj 		 * number.
251712f080e7Smrj 		 */
251812f080e7Smrj 		if ((rootnex_bind_warn) &&
251912f080e7Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
252012f080e7Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
252112f080e7Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
252212f080e7Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
252312f080e7Smrj 			    "There is a small risk of data corruption in "
252412f080e7Smrj 			    "particular with large I/Os. The driver should be "
252512f080e7Smrj 			    "replaced with a corrected version for proper "
252612f080e7Smrj 			    "system operation. To disable this warning, add "
252712f080e7Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
252812f080e7Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
252912f080e7Smrj 		}
253012f080e7Smrj 		return (DDI_DMA_TOOBIG);
253112f080e7Smrj 	}
253212f080e7Smrj 
253312f080e7Smrj 	/*
253412f080e7Smrj 	 * we might need multiple windows, setup state to handle them. In this
253512f080e7Smrj 	 * code path, we will have at least one window.
253612f080e7Smrj 	 */
253712f080e7Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
253812f080e7Smrj 	if (e != DDI_SUCCESS) {
253912f080e7Smrj 		rootnex_teardown_copybuf(dma);
254012f080e7Smrj 		return (e);
254112f080e7Smrj 	}
254212f080e7Smrj 
254312f080e7Smrj 	window = &dma->dp_window[0];
254412f080e7Smrj 	cookie = &dma->dp_cookies[0];
254512f080e7Smrj 	cur_offset = 0;
254612f080e7Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
254712f080e7Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
254812f080e7Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
254912f080e7Smrj 	}
255012f080e7Smrj 
255112f080e7Smrj 	/* loop though all the cookies we got back from get_sgl() */
255212f080e7Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
255312f080e7Smrj 		/*
255412f080e7Smrj 		 * If we're using the copy buffer, check this cookie and setup
255512f080e7Smrj 		 * its associated copy buffer state. If this cookie uses the
255612f080e7Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
255712f080e7Smrj 		 */
255812f080e7Smrj 		if (dma->dp_copybuf_size > 0) {
255912f080e7Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
256012f080e7Smrj 			    cur_offset, &copybuf_used, &cur_pp);
256112f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
256212f080e7Smrj 				window->wd_dosync = B_TRUE;
256312f080e7Smrj 			}
256412f080e7Smrj 		}
256512f080e7Smrj 
256612f080e7Smrj 		/*
256712f080e7Smrj 		 * save away the cookie size, since it could be modified in
256812f080e7Smrj 		 * the windowing code.
256912f080e7Smrj 		 */
257012f080e7Smrj 		dmac_size = cookie->dmac_size;
257112f080e7Smrj 
257212f080e7Smrj 		/* if we went over max copybuf size */
257312f080e7Smrj 		if (dma->dp_copybuf_size &&
257412f080e7Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
257512f080e7Smrj 			partial = B_TRUE;
257612f080e7Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
257712f080e7Smrj 			    cookie, cur_offset, &copybuf_used);
257812f080e7Smrj 			if (e != DDI_SUCCESS) {
257912f080e7Smrj 				rootnex_teardown_copybuf(dma);
258012f080e7Smrj 				rootnex_teardown_windows(dma);
258112f080e7Smrj 				return (e);
258212f080e7Smrj 			}
258312f080e7Smrj 
258412f080e7Smrj 			/*
258512f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
258612f080e7Smrj 			 * new window we just moved to is set to sync.
258712f080e7Smrj 			 */
258812f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
258912f080e7Smrj 				window->wd_dosync = B_TRUE;
259012f080e7Smrj 			}
259112f080e7Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
259212f080e7Smrj 			    dma->dp_dip);
259312f080e7Smrj 
259412f080e7Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
259512f080e7Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
259612f080e7Smrj 			partial = B_TRUE;
259712f080e7Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
259812f080e7Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
259912f080e7Smrj 			    cookie, attr, cur_offset);
260012f080e7Smrj 			if (e != DDI_SUCCESS) {
260112f080e7Smrj 				rootnex_teardown_copybuf(dma);
260212f080e7Smrj 				rootnex_teardown_windows(dma);
260312f080e7Smrj 				return (e);
260412f080e7Smrj 			}
260512f080e7Smrj 
260612f080e7Smrj 			/*
260712f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
260812f080e7Smrj 			 * new window we just moved to is set to sync.
260912f080e7Smrj 			 */
261012f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
261112f080e7Smrj 				window->wd_dosync = B_TRUE;
261212f080e7Smrj 			}
261312f080e7Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
261412f080e7Smrj 			    dma->dp_dip);
261512f080e7Smrj 
261612f080e7Smrj 		/* else if we will be over maxxfer */
261712f080e7Smrj 		} else if ((window->wd_size + dmac_size) >
261812f080e7Smrj 		    dma->dp_maxxfer) {
261912f080e7Smrj 			partial = B_TRUE;
262012f080e7Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
262112f080e7Smrj 			    cookie);
262212f080e7Smrj 			if (e != DDI_SUCCESS) {
262312f080e7Smrj 				rootnex_teardown_copybuf(dma);
262412f080e7Smrj 				rootnex_teardown_windows(dma);
262512f080e7Smrj 				return (e);
262612f080e7Smrj 			}
262712f080e7Smrj 
262812f080e7Smrj 			/*
262912f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
263012f080e7Smrj 			 * new window we just moved to is set to sync.
263112f080e7Smrj 			 */
263212f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
263312f080e7Smrj 				window->wd_dosync = B_TRUE;
263412f080e7Smrj 			}
263512f080e7Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
263612f080e7Smrj 			    dma->dp_dip);
263712f080e7Smrj 
263812f080e7Smrj 		/* else this cookie fits in the current window */
263912f080e7Smrj 		} else {
264012f080e7Smrj 			window->wd_cookie_cnt++;
264112f080e7Smrj 			window->wd_size += dmac_size;
264212f080e7Smrj 		}
264312f080e7Smrj 
264412f080e7Smrj 		/* track our offset into the buffer, go to the next cookie */
264512f080e7Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
264612f080e7Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
264712f080e7Smrj 		cur_offset += dmac_size;
264812f080e7Smrj 		cookie++;
264912f080e7Smrj 	}
265012f080e7Smrj 
265112f080e7Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
265212f080e7Smrj 	if (window->wd_size == 0) {
265312f080e7Smrj 		hp->dmai_nwin--;
265412f080e7Smrj 		window--;
265512f080e7Smrj 	}
265612f080e7Smrj 
265712f080e7Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
265812f080e7Smrj 
265912f080e7Smrj 	if (!partial) {
266012f080e7Smrj 		return (DDI_DMA_MAPPED);
266112f080e7Smrj 	}
266212f080e7Smrj 
266312f080e7Smrj 	ASSERT(dma->dp_partial_required);
266412f080e7Smrj 	return (DDI_DMA_PARTIAL_MAP);
266512f080e7Smrj }
266612f080e7Smrj 
266712f080e7Smrj 
266812f080e7Smrj /*
266912f080e7Smrj  * rootnex_setup_copybuf()
267012f080e7Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
267112f080e7Smrj  *    buffer, and if we do, sets up the basic state to handle it.
267212f080e7Smrj  */
267312f080e7Smrj static int
267412f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
267512f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
267612f080e7Smrj {
267712f080e7Smrj 	rootnex_sglinfo_t *sinfo;
267812f080e7Smrj 	ddi_dma_attr_t lattr;
267912f080e7Smrj 	size_t max_copybuf;
268012f080e7Smrj 	int cansleep;
268112f080e7Smrj 	int e;
268212f080e7Smrj #if !defined(__amd64)
268312f080e7Smrj 	int vmflag;
268412f080e7Smrj #endif
268512f080e7Smrj 
268612f080e7Smrj 
268712f080e7Smrj 	sinfo = &dma->dp_sglinfo;
268812f080e7Smrj 
268936945f79Smrj 	/* read this first so it's consistent through the routine  */
269036945f79Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
269112f080e7Smrj 
269212f080e7Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
269312f080e7Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
269412f080e7Smrj 
269512f080e7Smrj 	/* make sure the copybuf size <= the max size */
269612f080e7Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
269712f080e7Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
269812f080e7Smrj 
269912f080e7Smrj #if !defined(__amd64)
270012f080e7Smrj 	/*
270112f080e7Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
270212f080e7Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
270312f080e7Smrj 	 * the 64-bit kernel.
270412f080e7Smrj 	 */
270512f080e7Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
270612f080e7Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
270712f080e7Smrj 
270812f080e7Smrj 		/* convert the sleep flags */
270912f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
271012f080e7Smrj 			vmflag = VM_SLEEP;
271112f080e7Smrj 		} else {
271212f080e7Smrj 			vmflag = VM_NOSLEEP;
271312f080e7Smrj 		}
271412f080e7Smrj 
271512f080e7Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
271612f080e7Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
271712f080e7Smrj 		    vmflag);
271812f080e7Smrj 		if (dma->dp_kva == NULL) {
271912f080e7Smrj 			return (DDI_DMA_NORESOURCES);
272012f080e7Smrj 		}
272112f080e7Smrj 	}
272212f080e7Smrj #endif
272312f080e7Smrj 
272412f080e7Smrj 	/* convert the sleep flags */
272512f080e7Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
272612f080e7Smrj 		cansleep = 1;
272712f080e7Smrj 	} else {
272812f080e7Smrj 		cansleep = 0;
272912f080e7Smrj 	}
273012f080e7Smrj 
273112f080e7Smrj 	/*
2732*d21b39ddSmrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
2733*d21b39ddSmrj 	 * engine limits, so we can't use kmem_alloc... We don't need
2734*d21b39ddSmrj 	 * contiguous memory (sgllen) since we will be forcing windows on
2735*d21b39ddSmrj 	 * sgllen anyway.
273612f080e7Smrj 	 */
273712f080e7Smrj 	lattr = *attr;
273812f080e7Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
2739*d21b39ddSmrj 	/*
2740*d21b39ddSmrj 	 * this should be < 0 to indicate no limit, but due to a bug in
2741*d21b39ddSmrj 	 * the rootnex, we'll set it to the maximum positive int.
2742*d21b39ddSmrj 	 */
2743*d21b39ddSmrj 	lattr.dma_attr_sgllen = 0x7fffffff;
274412f080e7Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
274512f080e7Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
274612f080e7Smrj 	if (e != DDI_SUCCESS) {
274712f080e7Smrj #if !defined(__amd64)
274812f080e7Smrj 		if (dma->dp_kva != NULL) {
274912f080e7Smrj 			vmem_free(heap_arena, dma->dp_kva,
275012f080e7Smrj 			    dma->dp_copybuf_size);
275112f080e7Smrj 		}
275212f080e7Smrj #endif
275312f080e7Smrj 		return (DDI_DMA_NORESOURCES);
275412f080e7Smrj 	}
275512f080e7Smrj 
275612f080e7Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
275712f080e7Smrj 	    size_t, dma->dp_copybuf_size);
275812f080e7Smrj 
275912f080e7Smrj 	return (DDI_SUCCESS);
276012f080e7Smrj }
276112f080e7Smrj 
276212f080e7Smrj 
276312f080e7Smrj /*
276412f080e7Smrj  * rootnex_setup_windows()
276512f080e7Smrj  *    Called in bind slowpath to setup the window state. We always have windows
276612f080e7Smrj  *    in the slowpath. Even if the window count = 1.
276712f080e7Smrj  */
276812f080e7Smrj static int
276912f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
277012f080e7Smrj     ddi_dma_attr_t *attr, int kmflag)
277112f080e7Smrj {
277212f080e7Smrj 	rootnex_window_t *windowp;
277312f080e7Smrj 	rootnex_sglinfo_t *sinfo;
277412f080e7Smrj 	size_t copy_state_size;
277512f080e7Smrj 	size_t win_state_size;
277612f080e7Smrj 	size_t state_available;
277712f080e7Smrj 	size_t space_needed;
277812f080e7Smrj 	uint_t copybuf_win;
277912f080e7Smrj 	uint_t maxxfer_win;
278012f080e7Smrj 	size_t space_used;
278112f080e7Smrj 	uint_t sglwin;
278212f080e7Smrj 
278312f080e7Smrj 
278412f080e7Smrj 	sinfo = &dma->dp_sglinfo;
278512f080e7Smrj 
278612f080e7Smrj 	dma->dp_current_win = 0;
278712f080e7Smrj 	hp->dmai_nwin = 0;
278812f080e7Smrj 
278912f080e7Smrj 	/* If we don't need to do a partial, we only have one window */
279012f080e7Smrj 	if (!dma->dp_partial_required) {
279112f080e7Smrj 		dma->dp_max_win = 1;
279212f080e7Smrj 
279312f080e7Smrj 	/*
279412f080e7Smrj 	 * we need multiple windows, need to figure out the worse case number
279512f080e7Smrj 	 * of windows.
279612f080e7Smrj 	 */
27977c478bd9Sstevel@tonic-gate 	} else {
27987c478bd9Sstevel@tonic-gate 		/*
279912f080e7Smrj 		 * if we need windows because we need more copy buffer that
280012f080e7Smrj 		 * we allow, the worse case number of windows we could need
280112f080e7Smrj 		 * here would be (copybuf space required / copybuf space that
280212f080e7Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
280312f080e7Smrj 		 * extra pages on the trim for the first and last pages of the
280412f080e7Smrj 		 * buffer (a page is the minimum window size so under the right
280512f080e7Smrj 		 * attr settings, you could have a window for each page).
280612f080e7Smrj 		 * The last page will only be hit here if the size is not a
280712f080e7Smrj 		 * multiple of the granularity (which theoretically shouldn't
280812f080e7Smrj 		 * be the case but never has been enforced, so we could have
280912f080e7Smrj 		 * broken things without it).
28107c478bd9Sstevel@tonic-gate 		 */
281112f080e7Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
281212f080e7Smrj 			ASSERT(dma->dp_copybuf_size > 0);
281312f080e7Smrj 			copybuf_win = (sinfo->si_copybuf_req /
281412f080e7Smrj 			    dma->dp_copybuf_size) + 1 + 2;
28157c478bd9Sstevel@tonic-gate 		} else {
281612f080e7Smrj 			copybuf_win = 0;
28177c478bd9Sstevel@tonic-gate 		}
281812f080e7Smrj 
281912f080e7Smrj 		/*
282012f080e7Smrj 		 * if we need windows because we have more cookies than the H/W
282112f080e7Smrj 		 * can handle, the number of windows we would need here would
282212f080e7Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
282312f080e7Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
282412f080e7Smrj 		 * (see above comment about trim)
282512f080e7Smrj 		 */
282612f080e7Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
282712f080e7Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
282812f080e7Smrj 			    + 1) + 2;
28297c478bd9Sstevel@tonic-gate 		} else {
283012f080e7Smrj 			sglwin = 0;
28317c478bd9Sstevel@tonic-gate 		}
283212f080e7Smrj 
283312f080e7Smrj 		/*
283412f080e7Smrj 		 * if we need windows because we're binding more memory than the
283512f080e7Smrj 		 * H/W can transfer at once, the number of windows we would need
283612f080e7Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
283712f080e7Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
283812f080e7Smrj 		 * trim (see above comment about trim)
283912f080e7Smrj 		 */
284012f080e7Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
284112f080e7Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
284212f080e7Smrj 			    dma->dp_maxxfer) + 1 + 2;
284312f080e7Smrj 		} else {
284412f080e7Smrj 			maxxfer_win = 0;
28457c478bd9Sstevel@tonic-gate 		}
284612f080e7Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
284712f080e7Smrj 		ASSERT(dma->dp_max_win > 0);
284812f080e7Smrj 	}
284912f080e7Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
285012f080e7Smrj 
285112f080e7Smrj 	/*
285212f080e7Smrj 	 * Get space for window and potential copy buffer state. Before we
285312f080e7Smrj 	 * go and allocate memory, see if we can get away with using what's
285412f080e7Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
285512f080e7Smrj 	 */
285612f080e7Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
285712f080e7Smrj 	    sizeof (ddi_dma_cookie_t));
285812f080e7Smrj 
285912f080e7Smrj 	/* if we dynamically allocated space for the cookies */
286012f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
286112f080e7Smrj 		/* if we have more space in the pre-allocted buffer, use it */
286212f080e7Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
286312f080e7Smrj 		if ((dma->dp_cookie_size - space_used) <=
286412f080e7Smrj 		    rootnex_state->r_prealloc_size) {
286512f080e7Smrj 			state_available = rootnex_state->r_prealloc_size;
286612f080e7Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
286712f080e7Smrj 
286812f080e7Smrj 		/*
286912f080e7Smrj 		 * else, we have more free space in the dynamically allocated
287012f080e7Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
287112f080e7Smrj 		 * didn't need a lot of cookies.
287212f080e7Smrj 		 */
287312f080e7Smrj 		} else {
287412f080e7Smrj 			state_available = dma->dp_cookie_size - space_used;
287512f080e7Smrj 			windowp = (rootnex_window_t *)
287612f080e7Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
287712f080e7Smrj 		}
287812f080e7Smrj 
287912f080e7Smrj 	/* we used the pre-alloced buffer */
288012f080e7Smrj 	} else {
288112f080e7Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
288212f080e7Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
288312f080e7Smrj 		windowp = (rootnex_window_t *)
288412f080e7Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
288512f080e7Smrj 	}
288612f080e7Smrj 
288712f080e7Smrj 	/*
288812f080e7Smrj 	 * figure out how much state we need to track the copy buffer. Add an
288912f080e7Smrj 	 * addition 8 bytes for pointer alignemnt later.
289012f080e7Smrj 	 */
289112f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
289212f080e7Smrj 		copy_state_size = sinfo->si_max_pages *
289312f080e7Smrj 		    sizeof (rootnex_pgmap_t);
289412f080e7Smrj 	} else {
289512f080e7Smrj 		copy_state_size = 0;
289612f080e7Smrj 	}
289712f080e7Smrj 	/* add an additional 8 bytes for pointer alignment */
289812f080e7Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
289912f080e7Smrj 
290012f080e7Smrj 	/* if we have enough space already, use it */
290112f080e7Smrj 	if (state_available >= space_needed) {
290212f080e7Smrj 		dma->dp_window = windowp;
290312f080e7Smrj 		dma->dp_need_to_free_window = B_FALSE;
290412f080e7Smrj 
290512f080e7Smrj 	/* not enough space, need to allocate more. */
290612f080e7Smrj 	} else {
290712f080e7Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
290812f080e7Smrj 		if (dma->dp_window == NULL) {
290912f080e7Smrj 			return (DDI_DMA_NORESOURCES);
291012f080e7Smrj 		}
291112f080e7Smrj 		dma->dp_need_to_free_window = B_TRUE;
291212f080e7Smrj 		dma->dp_window_size = space_needed;
291312f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
291412f080e7Smrj 		    dma->dp_dip, size_t, space_needed);
291512f080e7Smrj 	}
291612f080e7Smrj 
291712f080e7Smrj 	/*
291812f080e7Smrj 	 * we allocate copy buffer state and window state at the same time.
291912f080e7Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
292012f080e7Smrj 	 */
292112f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
292212f080e7Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
292312f080e7Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
292412f080e7Smrj 
292512f080e7Smrj #if !defined(__amd64)
292612f080e7Smrj 		/*
292712f080e7Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
292812f080e7Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
292912f080e7Smrj 		 */
293012f080e7Smrj 		bzero(dma->dp_pgmap, copy_state_size);
293112f080e7Smrj #endif
293212f080e7Smrj 	} else {
293312f080e7Smrj 		dma->dp_pgmap = NULL;
293412f080e7Smrj 	}
293512f080e7Smrj 
293612f080e7Smrj 	return (DDI_SUCCESS);
293712f080e7Smrj }
293812f080e7Smrj 
293912f080e7Smrj 
294012f080e7Smrj /*
294112f080e7Smrj  * rootnex_teardown_copybuf()
294212f080e7Smrj  *    cleans up after rootnex_setup_copybuf()
294312f080e7Smrj  */
294412f080e7Smrj static void
294512f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
294612f080e7Smrj {
294712f080e7Smrj #if !defined(__amd64)
294812f080e7Smrj 	int i;
294912f080e7Smrj 
295012f080e7Smrj 	/*
295112f080e7Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
295212f080e7Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
295312f080e7Smrj 	 * arena. Then free the VMEM space.
295412f080e7Smrj 	 */
295512f080e7Smrj 	if (dma->dp_kva != NULL) {
295612f080e7Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
295712f080e7Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
295812f080e7Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
295912f080e7Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
296012f080e7Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
296112f080e7Smrj 			}
296212f080e7Smrj 		}
296312f080e7Smrj 
296412f080e7Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
296512f080e7Smrj 	}
296612f080e7Smrj 
296712f080e7Smrj #endif
296812f080e7Smrj 
296912f080e7Smrj 	/* if we allocated a copy buffer, free it */
297012f080e7Smrj 	if (dma->dp_cbaddr != NULL) {
29717b93957cSeota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
297212f080e7Smrj 	}
297312f080e7Smrj }
297412f080e7Smrj 
297512f080e7Smrj 
297612f080e7Smrj /*
297712f080e7Smrj  * rootnex_teardown_windows()
297812f080e7Smrj  *    cleans up after rootnex_setup_windows()
297912f080e7Smrj  */
298012f080e7Smrj static void
298112f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
298212f080e7Smrj {
298312f080e7Smrj 	/*
298412f080e7Smrj 	 * if we had to allocate window state on the last bind (because we
298512f080e7Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
298612f080e7Smrj 	 */
298712f080e7Smrj 	if (dma->dp_need_to_free_window) {
298812f080e7Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
298912f080e7Smrj 	}
299012f080e7Smrj }
299112f080e7Smrj 
299212f080e7Smrj 
299312f080e7Smrj /*
299412f080e7Smrj  * rootnex_init_win()
299512f080e7Smrj  *    Called in bind slow path during creation of a new window. Initializes
299612f080e7Smrj  *    window state to default values.
299712f080e7Smrj  */
299812f080e7Smrj /*ARGSUSED*/
299912f080e7Smrj static void
300012f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
300112f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
300212f080e7Smrj {
300312f080e7Smrj 	hp->dmai_nwin++;
300412f080e7Smrj 	window->wd_dosync = B_FALSE;
300512f080e7Smrj 	window->wd_offset = cur_offset;
300612f080e7Smrj 	window->wd_size = 0;
300712f080e7Smrj 	window->wd_first_cookie = cookie;
300812f080e7Smrj 	window->wd_cookie_cnt = 0;
300912f080e7Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
301012f080e7Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
301112f080e7Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
301212f080e7Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
301312f080e7Smrj #if !defined(__amd64)
301412f080e7Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
301512f080e7Smrj #endif
301612f080e7Smrj }
301712f080e7Smrj 
301812f080e7Smrj 
301912f080e7Smrj /*
302012f080e7Smrj  * rootnex_setup_cookie()
302112f080e7Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
302212f080e7Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
302312f080e7Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
302412f080e7Smrj  *    need during sync.
302512f080e7Smrj  */
302612f080e7Smrj static void
302712f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
302812f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
302912f080e7Smrj     page_t **cur_pp)
303012f080e7Smrj {
303112f080e7Smrj 	boolean_t copybuf_sz_power_2;
303212f080e7Smrj 	rootnex_sglinfo_t *sinfo;
3033843e1988Sjohnlev 	paddr_t paddr;
303412f080e7Smrj 	uint_t pidx;
303512f080e7Smrj 	uint_t pcnt;
303612f080e7Smrj 	off_t poff;
303712f080e7Smrj #if defined(__amd64)
303812f080e7Smrj 	pfn_t pfn;
303912f080e7Smrj #else
304012f080e7Smrj 	page_t **pplist;
304112f080e7Smrj #endif
304212f080e7Smrj 
304312f080e7Smrj 	sinfo = &dma->dp_sglinfo;
304412f080e7Smrj 
304512f080e7Smrj 	/*
304612f080e7Smrj 	 * Calculate the page index relative to the start of the buffer. The
304712f080e7Smrj 	 * index to the current page for our buffer is the offset into the
304812f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
304912f080e7Smrj 	 * itself, shifted of course...
305012f080e7Smrj 	 */
305112f080e7Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
305212f080e7Smrj 	ASSERT(pidx < sinfo->si_max_pages);
305312f080e7Smrj 
305412f080e7Smrj 	/* if this cookie uses the copy buffer */
305512f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
305612f080e7Smrj 		/*
305712f080e7Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
305812f080e7Smrj 		 * is <= MMU_PAGESIZE.
305912f080e7Smrj 		 */
306012f080e7Smrj 
306112f080e7Smrj 		/*
306212f080e7Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
306312f080e7Smrj 		 * pfn which we'll use with seg kpm.
306412f080e7Smrj 		 */
3065843e1988Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
306612f080e7Smrj #if defined(__amd64)
3067843e1988Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
3068843e1988Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
3069843e1988Sjohnlev #endif /* __amd64 */
307012f080e7Smrj 
307112f080e7Smrj 		/* figure out if the copybuf size is a power of 2 */
307212f080e7Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
307312f080e7Smrj 			copybuf_sz_power_2 = B_FALSE;
307412f080e7Smrj 		} else {
307512f080e7Smrj 			copybuf_sz_power_2 = B_TRUE;
307612f080e7Smrj 		}
307712f080e7Smrj 
307812f080e7Smrj 		/* This page uses the copy buffer */
307912f080e7Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
308012f080e7Smrj 
308112f080e7Smrj 		/*
308212f080e7Smrj 		 * save the copy buffer KVA that we'll use with this page.
308312f080e7Smrj 		 * if we still fit within the copybuf, it's a simple add.
308412f080e7Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
308512f080e7Smrj 		 */
308612f080e7Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
308712f080e7Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
308812f080e7Smrj 			    *copybuf_used;
308912f080e7Smrj 		} else {
309012f080e7Smrj 			if (copybuf_sz_power_2) {
309112f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
309212f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
309312f080e7Smrj 				    (*copybuf_used &
309412f080e7Smrj 				    (dma->dp_copybuf_size - 1)));
309512f080e7Smrj 			} else {
309612f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
309712f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
309812f080e7Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
309912f080e7Smrj 			}
310012f080e7Smrj 		}
310112f080e7Smrj 
310212f080e7Smrj 		/*
310312f080e7Smrj 		 * over write the cookie physical address with the address of
310412f080e7Smrj 		 * the physical address of the copy buffer page that we will
310512f080e7Smrj 		 * use.
310612f080e7Smrj 		 */
3107843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
310812f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
310912f080e7Smrj 
3110843e1988Sjohnlev #ifdef __xpv
3111843e1988Sjohnlev 		/*
3112843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3113843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3114843e1988Sjohnlev 		 */
3115843e1988Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3116843e1988Sjohnlev #else
3117843e1988Sjohnlev 		cookie->dmac_laddress = paddr;
3118843e1988Sjohnlev #endif
3119843e1988Sjohnlev 
312012f080e7Smrj 		/* if we have a kernel VA, it's easy, just save that address */
312112f080e7Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
312212f080e7Smrj 		    (sinfo->si_asp == &kas)) {
312312f080e7Smrj 			/*
312412f080e7Smrj 			 * save away the page aligned virtual address of the
312512f080e7Smrj 			 * driver buffer. Offsets are handled in the sync code.
312612f080e7Smrj 			 */
312712f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
312812f080e7Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
312912f080e7Smrj 			    & MMU_PAGEMASK);
313012f080e7Smrj #if !defined(__amd64)
313112f080e7Smrj 			/*
313212f080e7Smrj 			 * we didn't need to, and will never need to map this
313312f080e7Smrj 			 * page.
313412f080e7Smrj 			 */
313512f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
313612f080e7Smrj #endif
313712f080e7Smrj 
313812f080e7Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
313912f080e7Smrj 		} else {
314012f080e7Smrj #if defined(__amd64)
314112f080e7Smrj 			/*
314212f080e7Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
314312f080e7Smrj 			 * get a Kernel VA for the corresponding pfn.
314412f080e7Smrj 			 */
314512f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
314612f080e7Smrj #else
314712f080e7Smrj 			/*
314812f080e7Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
314912f080e7Smrj 			 * save away the page_t or user VA for this page. This
315012f080e7Smrj 			 * is needed in rootnex_dma_win() when we switch to a
315112f080e7Smrj 			 * new window which requires us to re-map the copy
315212f080e7Smrj 			 * buffer.
315312f080e7Smrj 			 */
315412f080e7Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
315512f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
315612f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
315712f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
315812f080e7Smrj 			} else if (pplist != NULL) {
315912f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
316012f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
316112f080e7Smrj 			} else {
316212f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
316312f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
316412f080e7Smrj 				    (((uintptr_t)
316512f080e7Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
316612f080e7Smrj 				    cur_offset) & MMU_PAGEMASK);
316712f080e7Smrj 			}
316812f080e7Smrj 
316912f080e7Smrj 			/*
317012f080e7Smrj 			 * save away the page aligned virtual address which was
317112f080e7Smrj 			 * allocated from the kernel heap arena (taking into
317212f080e7Smrj 			 * account if we need more copy buffer than we alloced
317312f080e7Smrj 			 * and use multiple windows to handle this, i.e. &,%).
317412f080e7Smrj 			 * NOTE: there isn't and physical memory backing up this
317512f080e7Smrj 			 * virtual address space currently.
317612f080e7Smrj 			 */
317712f080e7Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
317812f080e7Smrj 			    dma->dp_copybuf_size) {
317912f080e7Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
318012f080e7Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
318112f080e7Smrj 				    MMU_PAGEMASK);
318212f080e7Smrj 			} else {
318312f080e7Smrj 				if (copybuf_sz_power_2) {
318412f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
318512f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
318612f080e7Smrj 					    (*copybuf_used &
318712f080e7Smrj 					    (dma->dp_copybuf_size - 1))) &
318812f080e7Smrj 					    MMU_PAGEMASK);
318912f080e7Smrj 				} else {
319012f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
319112f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
319212f080e7Smrj 					    (*copybuf_used %
319312f080e7Smrj 					    dma->dp_copybuf_size)) &
319412f080e7Smrj 					    MMU_PAGEMASK);
319512f080e7Smrj 				}
319612f080e7Smrj 			}
319712f080e7Smrj 
319812f080e7Smrj 			/*
319912f080e7Smrj 			 * if we haven't used up the available copy buffer yet,
320012f080e7Smrj 			 * map the kva to the physical page.
320112f080e7Smrj 			 */
320212f080e7Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
320312f080e7Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
320412f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
320512f080e7Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
320612f080e7Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
320712f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
320812f080e7Smrj 				} else {
320912f080e7Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
321012f080e7Smrj 					    sinfo->si_asp,
321112f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
321212f080e7Smrj 				}
321312f080e7Smrj 
321412f080e7Smrj 			/*
321512f080e7Smrj 			 * we've used up the available copy buffer, this page
321612f080e7Smrj 			 * will have to be mapped during rootnex_dma_win() when
321712f080e7Smrj 			 * we switch to a new window which requires a re-map
321812f080e7Smrj 			 * the copy buffer. (32-bit kernel only)
321912f080e7Smrj 			 */
322012f080e7Smrj 			} else {
322112f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
322212f080e7Smrj 			}
322312f080e7Smrj #endif
322412f080e7Smrj 			/* go to the next page_t */
322512f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
322612f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
322712f080e7Smrj 			}
322812f080e7Smrj 		}
322912f080e7Smrj 
323012f080e7Smrj 		/* add to the copy buffer count */
323112f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
323212f080e7Smrj 
323312f080e7Smrj 	/*
323412f080e7Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
323512f080e7Smrj 	 * cookie occupies to reflect this.
323612f080e7Smrj 	 */
323712f080e7Smrj 	} else {
323812f080e7Smrj 		/*
323912f080e7Smrj 		 * figure out how many pages the cookie occupies. We need to
324012f080e7Smrj 		 * use the original page offset of the buffer and the cookies
324112f080e7Smrj 		 * offset in the buffer to do this.
324212f080e7Smrj 		 */
324312f080e7Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
324412f080e7Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
324512f080e7Smrj 
324612f080e7Smrj 		while (pcnt > 0) {
324712f080e7Smrj #if !defined(__amd64)
324812f080e7Smrj 			/*
324912f080e7Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
325012f080e7Smrj 			 * to map in the driver buffer (if it didn't come down
325112f080e7Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
325212f080e7Smrj 			 * use the copy buffer, it's not, or will it ever, have
325312f080e7Smrj 			 * to be mapped in.
325412f080e7Smrj 			 */
325512f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
325612f080e7Smrj #endif
325712f080e7Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
325812f080e7Smrj 
325912f080e7Smrj 			/*
326012f080e7Smrj 			 * we need to update pidx and cur_pp or we'll loose
326112f080e7Smrj 			 * track of where we are.
326212f080e7Smrj 			 */
326312f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
326412f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
326512f080e7Smrj 			}
326612f080e7Smrj 			pidx++;
326712f080e7Smrj 			pcnt--;
326812f080e7Smrj 		}
326912f080e7Smrj 	}
327012f080e7Smrj }
327112f080e7Smrj 
327212f080e7Smrj 
327312f080e7Smrj /*
327412f080e7Smrj  * rootnex_sgllen_window_boundary()
327512f080e7Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
327612f080e7Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
327712f080e7Smrj  *    length supported by the DMA H/W.
327812f080e7Smrj  */
327912f080e7Smrj static int
328012f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
328112f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
328212f080e7Smrj     off_t cur_offset)
328312f080e7Smrj {
328412f080e7Smrj 	off_t new_offset;
328512f080e7Smrj 	size_t trim_sz;
328612f080e7Smrj 	off_t coffset;
328712f080e7Smrj 
328812f080e7Smrj 
328912f080e7Smrj 	/*
329012f080e7Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
329112f080e7Smrj 	 * the next window and init it. We're done.
329212f080e7Smrj 	 */
329312f080e7Smrj 	if (!dma->dp_trim_required) {
329412f080e7Smrj 		(*windowp)++;
329512f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
329612f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
329712f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
329812f080e7Smrj 		return (DDI_SUCCESS);
329912f080e7Smrj 	}
330012f080e7Smrj 
330112f080e7Smrj 	/* figure out how much we need to trim from the window */
330212f080e7Smrj 	ASSERT(attr->dma_attr_granular != 0);
330312f080e7Smrj 	if (dma->dp_granularity_power_2) {
330412f080e7Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
330512f080e7Smrj 	} else {
330612f080e7Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
330712f080e7Smrj 	}
330812f080e7Smrj 
330912f080e7Smrj 	/* The window's a whole multiple of granularity. We're done */
331012f080e7Smrj 	if (trim_sz == 0) {
331112f080e7Smrj 		(*windowp)++;
331212f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
331312f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
331412f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
331512f080e7Smrj 		return (DDI_SUCCESS);
331612f080e7Smrj 	}
331712f080e7Smrj 
331812f080e7Smrj 	/*
331912f080e7Smrj 	 * The window's not a whole multiple of granularity, since we know this
332012f080e7Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
332112f080e7Smrj 	 * that one, add the left over part of the old cookie into the new
332212f080e7Smrj 	 * window, and then add in the new cookie into the new window.
332312f080e7Smrj 	 */
332412f080e7Smrj 
332512f080e7Smrj 	/*
332612f080e7Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
332712f080e7Smrj 	 * sgllen == 1 don't go together.
332812f080e7Smrj 	 */
332912f080e7Smrj 	if (attr->dma_attr_sgllen == 1) {
333012f080e7Smrj 		return (DDI_DMA_NOMAPPING);
333112f080e7Smrj 	}
333212f080e7Smrj 
333312f080e7Smrj 	/*
333412f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
333512f080e7Smrj 	 * back to the last cookie for this.
333612f080e7Smrj 	 */
333712f080e7Smrj 	cookie--;
333812f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
333912f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3340843e1988Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
334112f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
334212f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
334312f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
334412f080e7Smrj 
334512f080e7Smrj 	/* save the buffer offsets for the next window */
334612f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
334712f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
334812f080e7Smrj 
334912f080e7Smrj 	/*
335012f080e7Smrj 	 * set this now in case this is the first window. all other cases are
335112f080e7Smrj 	 * set in dma_win()
335212f080e7Smrj 	 */
335312f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
335412f080e7Smrj 
335512f080e7Smrj 	/*
335612f080e7Smrj 	 * initialize the next window using what's left over in the previous
335712f080e7Smrj 	 * cookie.
335812f080e7Smrj 	 */
335912f080e7Smrj 	(*windowp)++;
336012f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
336112f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
336212f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3363843e1988Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
336412f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
336512f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
336612f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
336712f080e7Smrj 	}
336812f080e7Smrj 
336912f080e7Smrj 	/*
337012f080e7Smrj 	 * now go back to the current cookie and add it to the new window. set
337112f080e7Smrj 	 * the new window size to the what was left over from the previous
337212f080e7Smrj 	 * cookie and what's in the current cookie.
337312f080e7Smrj 	 */
337412f080e7Smrj 	cookie++;
337512f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
337612f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
337712f080e7Smrj 
337812f080e7Smrj 	/*
337912f080e7Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
338012f080e7Smrj 	 * a max size of maxxfer). Handle that case.
338112f080e7Smrj 	 */
338212f080e7Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
338312f080e7Smrj 		/*
338412f080e7Smrj 		 * maxxfer is already a whole multiple of granularity, and this
338512f080e7Smrj 		 * trim will be <= the previous trim (since a cookie can't be
338612f080e7Smrj 		 * larger than maxxfer). Make things simple here.
338712f080e7Smrj 		 */
338812f080e7Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
338912f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
339012f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3391843e1988Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
339212f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
339312f080e7Smrj 		(*windowp)->wd_size -= trim_sz;
339412f080e7Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
339512f080e7Smrj 
339612f080e7Smrj 		/* save the buffer offsets for the next window */
339712f080e7Smrj 		coffset = cookie->dmac_size - trim_sz;
339812f080e7Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
339912f080e7Smrj 
340012f080e7Smrj 		/* setup the next window */
340112f080e7Smrj 		(*windowp)++;
340212f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
340312f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
340412f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3405843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
340612f080e7Smrj 		    coffset;
340712f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
340812f080e7Smrj 	}
340912f080e7Smrj 
341012f080e7Smrj 	return (DDI_SUCCESS);
341112f080e7Smrj }
341212f080e7Smrj 
341312f080e7Smrj 
341412f080e7Smrj /*
341512f080e7Smrj  * rootnex_copybuf_window_boundary()
341612f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we used
341712f080e7Smrj  *    up all the copy buffer that we have.
341812f080e7Smrj  */
341912f080e7Smrj static int
342012f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
342112f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
342212f080e7Smrj     size_t *copybuf_used)
342312f080e7Smrj {
342412f080e7Smrj 	rootnex_sglinfo_t *sinfo;
342512f080e7Smrj 	off_t new_offset;
342612f080e7Smrj 	size_t trim_sz;
3427843e1988Sjohnlev 	paddr_t paddr;
342812f080e7Smrj 	off_t coffset;
342912f080e7Smrj 	uint_t pidx;
343012f080e7Smrj 	off_t poff;
343112f080e7Smrj 
343212f080e7Smrj 
343312f080e7Smrj 	sinfo = &dma->dp_sglinfo;
343412f080e7Smrj 
343512f080e7Smrj 	/*
343612f080e7Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
343712f080e7Smrj 	 * this cookie is <= MMU_PAGESIZE.
343812f080e7Smrj 	 */
343912f080e7Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
344012f080e7Smrj 
344112f080e7Smrj 	/*
344212f080e7Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
344312f080e7Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
344412f080e7Smrj 	 * space...
344512f080e7Smrj 	 */
344612f080e7Smrj #if !defined(__amd64)
344712f080e7Smrj 	dma->dp_cb_remaping = B_TRUE;
344812f080e7Smrj #endif
344912f080e7Smrj 
345012f080e7Smrj 	/* reset copybuf used */
345112f080e7Smrj 	*copybuf_used = 0;
345212f080e7Smrj 
345312f080e7Smrj 	/*
345412f080e7Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
345512f080e7Smrj 	 * next window and add the current cookie to it. We know the current
345612f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
345712f080e7Smrj 	 */
345812f080e7Smrj 	if (!dma->dp_trim_required) {
345912f080e7Smrj 		(*windowp)++;
346012f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
346112f080e7Smrj 
346212f080e7Smrj 		/* Add this cookie to the new window */
346312f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
346412f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
346512f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
346612f080e7Smrj 		return (DDI_SUCCESS);
346712f080e7Smrj 	}
346812f080e7Smrj 
346912f080e7Smrj 	/*
347012f080e7Smrj 	 * *** may need to trim, figure it out.
347112f080e7Smrj 	 */
347212f080e7Smrj 
347312f080e7Smrj 	/* figure out how much we need to trim from the window */
347412f080e7Smrj 	if (dma->dp_granularity_power_2) {
347512f080e7Smrj 		trim_sz = (*windowp)->wd_size &
347612f080e7Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
347712f080e7Smrj 	} else {
347812f080e7Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
347912f080e7Smrj 	}
348012f080e7Smrj 
348112f080e7Smrj 	/*
348212f080e7Smrj 	 * if the window's a whole multiple of granularity, go to the next
348312f080e7Smrj 	 * window, init it, then add in the current cookie. We know the current
348412f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
348512f080e7Smrj 	 */
348612f080e7Smrj 	if (trim_sz == 0) {
348712f080e7Smrj 		(*windowp)++;
348812f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
348912f080e7Smrj 
349012f080e7Smrj 		/* Add this cookie to the new window */
349112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
349212f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
349312f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
349412f080e7Smrj 		return (DDI_SUCCESS);
349512f080e7Smrj 	}
349612f080e7Smrj 
349712f080e7Smrj 	/*
349812f080e7Smrj 	 * *** We figured it out, we definitly need to trim
349912f080e7Smrj 	 */
350012f080e7Smrj 
350112f080e7Smrj 	/*
350212f080e7Smrj 	 * make sure the driver isn't making us do something bad...
350312f080e7Smrj 	 * Trimming and sgllen == 1 don't go together.
350412f080e7Smrj 	 */
350512f080e7Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
350612f080e7Smrj 		return (DDI_DMA_NOMAPPING);
350712f080e7Smrj 	}
350812f080e7Smrj 
350912f080e7Smrj 	/*
351012f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
351112f080e7Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
351212f080e7Smrj 	 * the current window, and some of the last cookie will be in the new
351312f080e7Smrj 	 * window. All of the current cookie will be in the new window.
351412f080e7Smrj 	 */
351512f080e7Smrj 	cookie--;
351612f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
351712f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3518843e1988Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
351912f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
352012f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
352112f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
352212f080e7Smrj 
352312f080e7Smrj 	/*
352412f080e7Smrj 	 * we're trimming the last cookie (not the current cookie). So that
352512f080e7Smrj 	 * last cookie may have or may not have been using the copy buffer (
352612f080e7Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
352712f080e7Smrj 	 * this code path).
352812f080e7Smrj 	 *
352912f080e7Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
353012f080e7Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
353112f080e7Smrj 	 * last page in the current window and the first page in the next
353212f080e7Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
353312f080e7Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
353412f080e7Smrj 	 * current window, and the start of the copy buffer in the next window.
353512f080e7Smrj 	 * Track that info... The cookie physical address was already set to
353612f080e7Smrj 	 * the copy buffer physical address in setup_cookie..
353712f080e7Smrj 	 */
353812f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
353912f080e7Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
354012f080e7Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
354112f080e7Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
354212f080e7Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
354312f080e7Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
354412f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
354512f080e7Smrj #if !defined(__amd64)
354612f080e7Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
354712f080e7Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
354812f080e7Smrj #endif
354912f080e7Smrj 	}
355012f080e7Smrj 
355112f080e7Smrj 	/* save the buffer offsets for the next window */
355212f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
355312f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
355412f080e7Smrj 
355512f080e7Smrj 	/*
355612f080e7Smrj 	 * set this now in case this is the first window. all other cases are
355712f080e7Smrj 	 * set in dma_win()
355812f080e7Smrj 	 */
355912f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
356012f080e7Smrj 
356112f080e7Smrj 	/*
356212f080e7Smrj 	 * initialize the next window using what's left over in the previous
356312f080e7Smrj 	 * cookie.
356412f080e7Smrj 	 */
356512f080e7Smrj 	(*windowp)++;
356612f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
356712f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
356812f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3569843e1988Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
357012f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
357112f080e7Smrj 
357212f080e7Smrj 	/*
357312f080e7Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
357412f080e7Smrj 	 * read the comment above for more info on why we need to track
357512f080e7Smrj 	 * additional state.
357612f080e7Smrj 	 *
357712f080e7Smrj 	 * For the first cookie in the new window, we need reset the physical
357812f080e7Smrj 	 * address to DMA into to the start of the copy buffer plus any
357912f080e7Smrj 	 * initial page offset which may be present.
358012f080e7Smrj 	 */
358112f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
358212f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
358312f080e7Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
358412f080e7Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
358512f080e7Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
358612f080e7Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
3587843e1988Sjohnlev 
3588843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
3589843e1988Sjohnlev 		    poff;
3590843e1988Sjohnlev #ifdef __xpv
3591843e1988Sjohnlev 		/*
3592843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3593843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3594843e1988Sjohnlev 		 */
3595843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
3596843e1988Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3597843e1988Sjohnlev #else
3598843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
3599843e1988Sjohnlev #endif
3600843e1988Sjohnlev 
360112f080e7Smrj #if !defined(__amd64)
360212f080e7Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
360312f080e7Smrj #endif
360412f080e7Smrj 		/* account for the cookie copybuf usage in the new window */
360512f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
360612f080e7Smrj 
360712f080e7Smrj 		/*
360812f080e7Smrj 		 * every piece of code has to have a hack, and here is this
360912f080e7Smrj 		 * ones :-)
361012f080e7Smrj 		 *
361112f080e7Smrj 		 * There is a complex interaction between setup_cookie and the
361212f080e7Smrj 		 * copybuf window boundary. The complexity had to be in either
361312f080e7Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
361412f080e7Smrj 		 * copybuf code.
361512f080e7Smrj 		 *
361612f080e7Smrj 		 * So in this code path, we have taken the last cookie,
361712f080e7Smrj 		 * virtually broken it in half due to the trim, and it happens
361812f080e7Smrj 		 * to use the copybuf which further complicates life. At the
361912f080e7Smrj 		 * same time, we have already setup the current cookie, which
362012f080e7Smrj 		 * is now wrong. More background info: the current cookie uses
362112f080e7Smrj 		 * the copybuf, so it is only a page long max. So we need to
362212f080e7Smrj 		 * fix the current cookies copy buffer address, physical
362312f080e7Smrj 		 * address, and kva for the 32-bit kernel. We due this by
362412f080e7Smrj 		 * bumping them by page size (of course, we can't due this on
362512f080e7Smrj 		 * the physical address since the copy buffer may not be
362612f080e7Smrj 		 * physically contiguous).
362712f080e7Smrj 		 */
362812f080e7Smrj 		cookie++;
362912f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
3630843e1988Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3631843e1988Sjohnlev 
3632843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
363312f080e7Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
3634843e1988Sjohnlev #ifdef __xpv
3635843e1988Sjohnlev 		/*
3636843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3637843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3638843e1988Sjohnlev 		 */
3639843e1988Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3640843e1988Sjohnlev #else
3641843e1988Sjohnlev 		cookie->dmac_laddress = paddr;
3642843e1988Sjohnlev #endif
3643843e1988Sjohnlev 
364412f080e7Smrj #if !defined(__amd64)
364512f080e7Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
364612f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
364712f080e7Smrj #endif
364812f080e7Smrj 	} else {
364912f080e7Smrj 		/* go back to the current cookie */
365012f080e7Smrj 		cookie++;
365112f080e7Smrj 	}
365212f080e7Smrj 
365312f080e7Smrj 	/*
365412f080e7Smrj 	 * add the current cookie to the new window. set the new window size to
365512f080e7Smrj 	 * the what was left over from the previous cookie and what's in the
365612f080e7Smrj 	 * current cookie.
365712f080e7Smrj 	 */
365812f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
365912f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
366012f080e7Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
366112f080e7Smrj 
366212f080e7Smrj 	/*
366312f080e7Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
366412f080e7Smrj 	 * wouldn't be here if it didn't.
366512f080e7Smrj 	 */
366612f080e7Smrj 	*copybuf_used += MMU_PAGESIZE;
366712f080e7Smrj 
366812f080e7Smrj 	return (DDI_SUCCESS);
366912f080e7Smrj }
367012f080e7Smrj 
367112f080e7Smrj 
367212f080e7Smrj /*
367312f080e7Smrj  * rootnex_maxxfer_window_boundary()
367412f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we will
367512f080e7Smrj  *    go over maxxfer.
367612f080e7Smrj  */
367712f080e7Smrj static int
367812f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
367912f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
368012f080e7Smrj {
368112f080e7Smrj 	size_t dmac_size;
368212f080e7Smrj 	off_t new_offset;
368312f080e7Smrj 	size_t trim_sz;
368412f080e7Smrj 	off_t coffset;
368512f080e7Smrj 
368612f080e7Smrj 
368712f080e7Smrj 	/*
368812f080e7Smrj 	 * calculate how much we have to trim off of the current cookie to equal
368912f080e7Smrj 	 * maxxfer. We don't have to account for granularity here since our
369012f080e7Smrj 	 * maxxfer already takes that into account.
369112f080e7Smrj 	 */
369212f080e7Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
369312f080e7Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
369412f080e7Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
369512f080e7Smrj 
369612f080e7Smrj 	/* save cookie size since we need it later and we might change it */
369712f080e7Smrj 	dmac_size = cookie->dmac_size;
369812f080e7Smrj 
369912f080e7Smrj 	/*
370012f080e7Smrj 	 * if we're not trimming the entire cookie, setup the current window to
370112f080e7Smrj 	 * account for the trim.
370212f080e7Smrj 	 */
370312f080e7Smrj 	if (trim_sz < cookie->dmac_size) {
370412f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
370512f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
370612f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3707843e1988Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
370812f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
370912f080e7Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
371012f080e7Smrj 
371112f080e7Smrj 		/*
371212f080e7Smrj 		 * set the adjusted cookie size now in case this is the first
371312f080e7Smrj 		 * window. All other windows are taken care of in get win
371412f080e7Smrj 		 */
371512f080e7Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
371612f080e7Smrj 	}
371712f080e7Smrj 
371812f080e7Smrj 	/*
371912f080e7Smrj 	 * coffset is the current offset within the cookie, new_offset is the
372012f080e7Smrj 	 * current offset with the entire buffer.
372112f080e7Smrj 	 */
372212f080e7Smrj 	coffset = dmac_size - trim_sz;
372312f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
372412f080e7Smrj 
372512f080e7Smrj 	/* initialize the next window */
372612f080e7Smrj 	(*windowp)++;
372712f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
372812f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
372912f080e7Smrj 	(*windowp)->wd_size = trim_sz;
373012f080e7Smrj 	if (trim_sz < dmac_size) {
373112f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3732843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
373312f080e7Smrj 		    coffset;
373412f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
373512f080e7Smrj 	}
373612f080e7Smrj 
373712f080e7Smrj 	return (DDI_SUCCESS);
373812f080e7Smrj }
373912f080e7Smrj 
374012f080e7Smrj 
374112f080e7Smrj /*
374212f080e7Smrj  * rootnex_dma_sync()
374312f080e7Smrj  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
374412f080e7Smrj  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
374512f080e7Smrj  *    is set, ddi_dma_sync() returns immediately passing back success.
374612f080e7Smrj  */
374712f080e7Smrj /*ARGSUSED*/
374812f080e7Smrj static int
374912f080e7Smrj rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
375012f080e7Smrj     off_t off, size_t len, uint_t cache_flags)
375112f080e7Smrj {
375212f080e7Smrj 	rootnex_sglinfo_t *sinfo;
375312f080e7Smrj 	rootnex_pgmap_t *cbpage;
375412f080e7Smrj 	rootnex_window_t *win;
375512f080e7Smrj 	ddi_dma_impl_t *hp;
375612f080e7Smrj 	rootnex_dma_t *dma;
375712f080e7Smrj 	caddr_t fromaddr;
375812f080e7Smrj 	caddr_t toaddr;
375912f080e7Smrj 	uint_t psize;
376012f080e7Smrj 	off_t offset;
376112f080e7Smrj 	uint_t pidx;
376212f080e7Smrj 	size_t size;
376312f080e7Smrj 	off_t poff;
376412f080e7Smrj 	int e;
376512f080e7Smrj 
376612f080e7Smrj 
376712f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
376812f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
376912f080e7Smrj 	sinfo = &dma->dp_sglinfo;
377012f080e7Smrj 
377112f080e7Smrj 	/*
377212f080e7Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
377312f080e7Smrj 	 * will cause us to have at least one window.
377412f080e7Smrj 	 */
377512f080e7Smrj 	if (dma->dp_window == NULL) {
377612f080e7Smrj 		return (DDI_SUCCESS);
377712f080e7Smrj 	}
377812f080e7Smrj 
377912f080e7Smrj 	/* This window may not need to be sync'd */
378012f080e7Smrj 	win = &dma->dp_window[dma->dp_current_win];
378112f080e7Smrj 	if (!win->wd_dosync) {
378212f080e7Smrj 		return (DDI_SUCCESS);
378312f080e7Smrj 	}
378412f080e7Smrj 
378512f080e7Smrj 	/* handle off and len special cases */
378612f080e7Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
378712f080e7Smrj 		offset = win->wd_offset;
378812f080e7Smrj 	} else {
378912f080e7Smrj 		offset = off;
379012f080e7Smrj 	}
379112f080e7Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
379212f080e7Smrj 		size = win->wd_size;
379312f080e7Smrj 	} else {
379412f080e7Smrj 		size = len;
379512f080e7Smrj 	}
379612f080e7Smrj 
379712f080e7Smrj 	/* check the sync args to make sure they make a little sense */
379812f080e7Smrj 	if (rootnex_sync_check_parms) {
379912f080e7Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
380012f080e7Smrj 		    cache_flags);
380112f080e7Smrj 		if (e != DDI_SUCCESS) {
380212f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
380312f080e7Smrj 			return (DDI_FAILURE);
380412f080e7Smrj 		}
380512f080e7Smrj 	}
380612f080e7Smrj 
380712f080e7Smrj 	/*
380812f080e7Smrj 	 * special case the first page to handle the offset into the page. The
380912f080e7Smrj 	 * offset to the current page for our buffer is the offset into the
381012f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
381112f080e7Smrj 	 * itself, masked of course.
381212f080e7Smrj 	 */
381312f080e7Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
381412f080e7Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
381512f080e7Smrj 
381612f080e7Smrj 	/* go through all the pages that we want to sync */
381712f080e7Smrj 	while (size > 0) {
381812f080e7Smrj 		/*
381912f080e7Smrj 		 * Calculate the page index relative to the start of the buffer.
382012f080e7Smrj 		 * The index to the current page for our buffer is the offset
382112f080e7Smrj 		 * into the first page of the buffer plus our current offset
382212f080e7Smrj 		 * into the buffer itself, shifted of course...
382312f080e7Smrj 		 */
382412f080e7Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
382512f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
382612f080e7Smrj 
382712f080e7Smrj 		/*
382812f080e7Smrj 		 * if this page uses the copy buffer, we need to sync it,
382912f080e7Smrj 		 * otherwise, go on to the next page.
383012f080e7Smrj 		 */
383112f080e7Smrj 		cbpage = &dma->dp_pgmap[pidx];
383212f080e7Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
383312f080e7Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
383412f080e7Smrj 		if (cbpage->pm_uses_copybuf) {
383512f080e7Smrj 			/* cbaddr and kaddr should be page aligned */
383612f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
383712f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
383812f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
383912f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
384012f080e7Smrj 
384112f080e7Smrj 			/*
384212f080e7Smrj 			 * if we're copying for the device, we are going to
384312f080e7Smrj 			 * copy from the drivers buffer and to the rootnex
384412f080e7Smrj 			 * allocated copy buffer.
384512f080e7Smrj 			 */
384612f080e7Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
384712f080e7Smrj 				fromaddr = cbpage->pm_kaddr + poff;
384812f080e7Smrj 				toaddr = cbpage->pm_cbaddr + poff;
384912f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
385012f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
385112f080e7Smrj 
385212f080e7Smrj 			/*
385312f080e7Smrj 			 * if we're copying for the cpu/kernel, we are going to
385412f080e7Smrj 			 * copy from the rootnex allocated copy buffer to the
385512f080e7Smrj 			 * drivers buffer.
385612f080e7Smrj 			 */
385712f080e7Smrj 			} else {
385812f080e7Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
385912f080e7Smrj 				toaddr = cbpage->pm_kaddr + poff;
386012f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
386112f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
386212f080e7Smrj 			}
386312f080e7Smrj 
386412f080e7Smrj 			bcopy(fromaddr, toaddr, psize);
386512f080e7Smrj 		}
386612f080e7Smrj 
386712f080e7Smrj 		/*
386812f080e7Smrj 		 * decrement size until we're done, update our offset into the
386912f080e7Smrj 		 * buffer, and get the next page size.
387012f080e7Smrj 		 */
387112f080e7Smrj 		size -= psize;
387212f080e7Smrj 		offset += psize;
387312f080e7Smrj 		psize = MIN(MMU_PAGESIZE, size);
387412f080e7Smrj 
387512f080e7Smrj 		/* page offset is zero for the rest of this loop */
387612f080e7Smrj 		poff = 0;
387712f080e7Smrj 	}
387812f080e7Smrj 
387912f080e7Smrj 	return (DDI_SUCCESS);
388012f080e7Smrj }
388112f080e7Smrj 
388212f080e7Smrj 
388312f080e7Smrj /*
388412f080e7Smrj  * rootnex_valid_sync_parms()
388512f080e7Smrj  *    checks the parameters passed to sync to verify they are correct.
388612f080e7Smrj  */
388712f080e7Smrj static int
388812f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
388912f080e7Smrj     off_t offset, size_t size, uint_t cache_flags)
389012f080e7Smrj {
389112f080e7Smrj 	off_t woffset;
389212f080e7Smrj 
389312f080e7Smrj 
389412f080e7Smrj 	/*
389512f080e7Smrj 	 * the first part of the test to make sure the offset passed in is
389612f080e7Smrj 	 * within the window.
389712f080e7Smrj 	 */
389812f080e7Smrj 	if (offset < win->wd_offset) {
389912f080e7Smrj 		return (DDI_FAILURE);
390012f080e7Smrj 	}
390112f080e7Smrj 
390212f080e7Smrj 	/*
390312f080e7Smrj 	 * second and last part of the test to make sure the offset and length
390412f080e7Smrj 	 * passed in is within the window.
390512f080e7Smrj 	 */
390612f080e7Smrj 	woffset = offset - win->wd_offset;
390712f080e7Smrj 	if ((woffset + size) > win->wd_size) {
390812f080e7Smrj 		return (DDI_FAILURE);
390912f080e7Smrj 	}
391012f080e7Smrj 
391112f080e7Smrj 	/*
391212f080e7Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
391312f080e7Smrj 	 * be set too.
391412f080e7Smrj 	 */
391512f080e7Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
391612f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
391712f080e7Smrj 		return (DDI_SUCCESS);
391812f080e7Smrj 	}
391912f080e7Smrj 
392012f080e7Smrj 	/*
392112f080e7Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
392212f080e7Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
392312f080e7Smrj 	 */
392412f080e7Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
392512f080e7Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
392612f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
392712f080e7Smrj 		return (DDI_SUCCESS);
392812f080e7Smrj 	}
392912f080e7Smrj 
393012f080e7Smrj 	return (DDI_FAILURE);
393112f080e7Smrj }
393212f080e7Smrj 
393312f080e7Smrj 
393412f080e7Smrj /*
393512f080e7Smrj  * rootnex_dma_win()
393612f080e7Smrj  *    called from ddi_dma_getwin()
393712f080e7Smrj  */
393812f080e7Smrj /*ARGSUSED*/
393912f080e7Smrj static int
394012f080e7Smrj rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
394112f080e7Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
394212f080e7Smrj     uint_t *ccountp)
394312f080e7Smrj {
394412f080e7Smrj 	rootnex_window_t *window;
394512f080e7Smrj 	rootnex_trim_t *trim;
394612f080e7Smrj 	ddi_dma_impl_t *hp;
394712f080e7Smrj 	rootnex_dma_t *dma;
394812f080e7Smrj #if !defined(__amd64)
394912f080e7Smrj 	rootnex_sglinfo_t *sinfo;
395012f080e7Smrj 	rootnex_pgmap_t *pmap;
395112f080e7Smrj 	uint_t pidx;
395212f080e7Smrj 	uint_t pcnt;
395312f080e7Smrj 	off_t poff;
395412f080e7Smrj 	int i;
395512f080e7Smrj #endif
395612f080e7Smrj 
395712f080e7Smrj 
395812f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
395912f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
396012f080e7Smrj #if !defined(__amd64)
396112f080e7Smrj 	sinfo = &dma->dp_sglinfo;
396212f080e7Smrj #endif
396312f080e7Smrj 
396412f080e7Smrj 	/* If we try and get a window which doesn't exist, return failure */
396512f080e7Smrj 	if (win >= hp->dmai_nwin) {
396612f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
396712f080e7Smrj 		return (DDI_FAILURE);
396812f080e7Smrj 	}
396912f080e7Smrj 
397012f080e7Smrj 	/*
397112f080e7Smrj 	 * if we don't have any windows, and they're asking for the first
397212f080e7Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
397312f080e7Smrj 	 * setup our return values, then increment the cookie since we return
397412f080e7Smrj 	 * the first cookie on the stack.
397512f080e7Smrj 	 */
397612f080e7Smrj 	if (dma->dp_window == NULL) {
397712f080e7Smrj 		if (win != 0) {
397812f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
397912f080e7Smrj 			return (DDI_FAILURE);
398012f080e7Smrj 		}
398112f080e7Smrj 		hp->dmai_cookie = dma->dp_cookies;
398212f080e7Smrj 		*offp = 0;
398312f080e7Smrj 		*lenp = dma->dp_dma.dmao_size;
398412f080e7Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
398512f080e7Smrj 		*cookiep = hp->dmai_cookie[0];
398612f080e7Smrj 		hp->dmai_cookie++;
398712f080e7Smrj 		return (DDI_SUCCESS);
398812f080e7Smrj 	}
398912f080e7Smrj 
399012f080e7Smrj 	/* sync the old window before moving on to the new one */
399112f080e7Smrj 	window = &dma->dp_window[dma->dp_current_win];
399212f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
399312f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
399412f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
399512f080e7Smrj 	}
399612f080e7Smrj 
399712f080e7Smrj #if !defined(__amd64)
399812f080e7Smrj 	/*
399912f080e7Smrj 	 * before we move to the next window, if we need to re-map, unmap all
400012f080e7Smrj 	 * the pages in this window.
400112f080e7Smrj 	 */
400212f080e7Smrj 	if (dma->dp_cb_remaping) {
400312f080e7Smrj 		/*
400412f080e7Smrj 		 * If we switch to this window again, we'll need to map in
400512f080e7Smrj 		 * on the fly next time.
400612f080e7Smrj 		 */
400712f080e7Smrj 		window->wd_remap_copybuf = B_TRUE;
400812f080e7Smrj 
400912f080e7Smrj 		/*
401012f080e7Smrj 		 * calculate the page index into the buffer where this window
401112f080e7Smrj 		 * starts, and the number of pages this window takes up.
401212f080e7Smrj 		 */
401312f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
401412f080e7Smrj 		    MMU_PAGESHIFT;
401512f080e7Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
401612f080e7Smrj 		    MMU_PAGEOFFSET;
401712f080e7Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
401812f080e7Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
401912f080e7Smrj 
402012f080e7Smrj 		/* unmap pages which are currently mapped in this window */
402112f080e7Smrj 		for (i = 0; i < pcnt; i++) {
402212f080e7Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
402312f080e7Smrj 				hat_unload(kas.a_hat,
402412f080e7Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
402512f080e7Smrj 				    HAT_UNLOAD);
402612f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
402712f080e7Smrj 			}
402812f080e7Smrj 			pidx++;
402912f080e7Smrj 		}
403012f080e7Smrj 	}
403112f080e7Smrj #endif
403212f080e7Smrj 
403312f080e7Smrj 	/*
403412f080e7Smrj 	 * Move to the new window.
403512f080e7Smrj 	 * NOTE: current_win must be set for sync to work right
403612f080e7Smrj 	 */
403712f080e7Smrj 	dma->dp_current_win = win;
403812f080e7Smrj 	window = &dma->dp_window[win];
403912f080e7Smrj 
404012f080e7Smrj 	/* if needed, adjust the first and/or last cookies for trim */
404112f080e7Smrj 	trim = &window->wd_trim;
404212f080e7Smrj 	if (trim->tr_trim_first) {
4043843e1988Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
404412f080e7Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
404512f080e7Smrj #if !defined(__amd64)
404612f080e7Smrj 		window->wd_first_cookie->dmac_type =
404712f080e7Smrj 		    (window->wd_first_cookie->dmac_type &
404812f080e7Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
404912f080e7Smrj #endif
405012f080e7Smrj 		if (trim->tr_first_copybuf_win) {
405112f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
405212f080e7Smrj 			    trim->tr_first_cbaddr;
405312f080e7Smrj #if !defined(__amd64)
405412f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
405512f080e7Smrj 			    trim->tr_first_kaddr;
405612f080e7Smrj #endif
405712f080e7Smrj 		}
405812f080e7Smrj 	}
405912f080e7Smrj 	if (trim->tr_trim_last) {
4060843e1988Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
406112f080e7Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
406212f080e7Smrj 		if (trim->tr_last_copybuf_win) {
406312f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
406412f080e7Smrj 			    trim->tr_last_cbaddr;
406512f080e7Smrj #if !defined(__amd64)
406612f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
406712f080e7Smrj 			    trim->tr_last_kaddr;
406812f080e7Smrj #endif
406912f080e7Smrj 		}
407012f080e7Smrj 	}
407112f080e7Smrj 
407212f080e7Smrj 	/*
407312f080e7Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
407412f080e7Smrj 	 * our return values, then increment the cookie since we return the
407512f080e7Smrj 	 * first cookie on the stack.
407612f080e7Smrj 	 */
407712f080e7Smrj 	hp->dmai_cookie = window->wd_first_cookie;
407812f080e7Smrj 	*offp = window->wd_offset;
407912f080e7Smrj 	*lenp = window->wd_size;
408012f080e7Smrj 	*ccountp = window->wd_cookie_cnt;
408112f080e7Smrj 	*cookiep = hp->dmai_cookie[0];
408212f080e7Smrj 	hp->dmai_cookie++;
408312f080e7Smrj 
408412f080e7Smrj #if !defined(__amd64)
408512f080e7Smrj 	/* re-map copybuf if required for this window */
408612f080e7Smrj 	if (dma->dp_cb_remaping) {
408712f080e7Smrj 		/*
408812f080e7Smrj 		 * calculate the page index into the buffer where this
408912f080e7Smrj 		 * window starts.
409012f080e7Smrj 		 */
409112f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
409212f080e7Smrj 		    MMU_PAGESHIFT;
409312f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
409412f080e7Smrj 
409512f080e7Smrj 		/*
409612f080e7Smrj 		 * the first page can get unmapped if it's shared with the
409712f080e7Smrj 		 * previous window. Even if the rest of this window is already
409812f080e7Smrj 		 * mapped in, we need to still check this one.
409912f080e7Smrj 		 */
410012f080e7Smrj 		pmap = &dma->dp_pgmap[pidx];
410112f080e7Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
410212f080e7Smrj 			if (pmap->pm_pp != NULL) {
410312f080e7Smrj 				pmap->pm_mapped = B_TRUE;
410412f080e7Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
410512f080e7Smrj 			} else if (pmap->pm_vaddr != NULL) {
410612f080e7Smrj 				pmap->pm_mapped = B_TRUE;
410712f080e7Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
410812f080e7Smrj 				    pmap->pm_kaddr);
410912f080e7Smrj 			}
411012f080e7Smrj 		}
411112f080e7Smrj 		pidx++;
411212f080e7Smrj 
411312f080e7Smrj 		/* map in the rest of the pages if required */
411412f080e7Smrj 		if (window->wd_remap_copybuf) {
411512f080e7Smrj 			window->wd_remap_copybuf = B_FALSE;
411612f080e7Smrj 
411712f080e7Smrj 			/* figure out many pages this window takes up */
411812f080e7Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
411912f080e7Smrj 			    MMU_PAGEOFFSET;
412012f080e7Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
412112f080e7Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
412212f080e7Smrj 
412312f080e7Smrj 			/* map pages which require it */
412412f080e7Smrj 			for (i = 1; i < pcnt; i++) {
412512f080e7Smrj 				pmap = &dma->dp_pgmap[pidx];
412612f080e7Smrj 				if (pmap->pm_uses_copybuf) {
412712f080e7Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
412812f080e7Smrj 					if (pmap->pm_pp != NULL) {
412912f080e7Smrj 						pmap->pm_mapped = B_TRUE;
413012f080e7Smrj 						i86_pp_map(pmap->pm_pp,
413112f080e7Smrj 						    pmap->pm_kaddr);
413212f080e7Smrj 					} else if (pmap->pm_vaddr != NULL) {
413312f080e7Smrj 						pmap->pm_mapped = B_TRUE;
413412f080e7Smrj 						i86_va_map(pmap->pm_vaddr,
413512f080e7Smrj 						    sinfo->si_asp,
413612f080e7Smrj 						    pmap->pm_kaddr);
413712f080e7Smrj 					}
413812f080e7Smrj 				}
413912f080e7Smrj 				pidx++;
414012f080e7Smrj 			}
414112f080e7Smrj 		}
414212f080e7Smrj 	}
414312f080e7Smrj #endif
414412f080e7Smrj 
414512f080e7Smrj 	/* if the new window uses the copy buffer, sync it for the device */
414612f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
414712f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
414812f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
414912f080e7Smrj 	}
415012f080e7Smrj 
415112f080e7Smrj 	return (DDI_SUCCESS);
415212f080e7Smrj }
415312f080e7Smrj 
415412f080e7Smrj 
415512f080e7Smrj 
415612f080e7Smrj /*
415712f080e7Smrj  * ************************
415812f080e7Smrj  *  obsoleted dma routines
415912f080e7Smrj  * ************************
416012f080e7Smrj  */
416112f080e7Smrj 
416212f080e7Smrj /*
416312f080e7Smrj  * rootnex_dma_map()
416412f080e7Smrj  *    called from ddi_dma_setup()
416512f080e7Smrj  */
416612f080e7Smrj /* ARGSUSED */
416712f080e7Smrj static int
416812f080e7Smrj rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq,
416912f080e7Smrj     ddi_dma_handle_t *handlep)
417012f080e7Smrj {
417112f080e7Smrj #if defined(__amd64)
417212f080e7Smrj 	/*
417312f080e7Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
417412f080e7Smrj 	 * rootnex_dma_mctl()
417512f080e7Smrj 	 */
417612f080e7Smrj 	return (DDI_DMA_NORESOURCES);
417712f080e7Smrj 
417812f080e7Smrj #else /* 32-bit x86 kernel */
417912f080e7Smrj 	ddi_dma_handle_t *lhandlep;
418012f080e7Smrj 	ddi_dma_handle_t lhandle;
418112f080e7Smrj 	ddi_dma_cookie_t cookie;
418212f080e7Smrj 	ddi_dma_attr_t dma_attr;
418312f080e7Smrj 	ddi_dma_lim_t *dma_lim;
418412f080e7Smrj 	uint_t ccnt;
418512f080e7Smrj 	int e;
418612f080e7Smrj 
418712f080e7Smrj 
418812f080e7Smrj 	/*
418912f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
419012f080e7Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
419112f080e7Smrj 	 */
419212f080e7Smrj 	if (handlep == NULL) {
419312f080e7Smrj 		lhandlep = &lhandle;
419412f080e7Smrj 	} else {
419512f080e7Smrj 		lhandlep = handlep;
419612f080e7Smrj 	}
419712f080e7Smrj 
419812f080e7Smrj 	/* convert the limit structure to a dma_attr one */
419912f080e7Smrj 	dma_lim = dmareq->dmar_limits;
420012f080e7Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
420112f080e7Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
420212f080e7Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
420312f080e7Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
420412f080e7Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
420512f080e7Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
420612f080e7Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
420712f080e7Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
420812f080e7Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
420912f080e7Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
421012f080e7Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
421112f080e7Smrj 	dma_attr.dma_attr_flags = 0;
421212f080e7Smrj 
421312f080e7Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
421412f080e7Smrj 	    dmareq->dmar_arg, lhandlep);
421512f080e7Smrj 	if (e != DDI_SUCCESS) {
421612f080e7Smrj 		return (e);
421712f080e7Smrj 	}
421812f080e7Smrj 
421912f080e7Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
422012f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
422112f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
422212f080e7Smrj 		return (e);
422312f080e7Smrj 	}
422412f080e7Smrj 
422512f080e7Smrj 	/*
422612f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
422712f080e7Smrj 	 * free up the local state and return the result.
422812f080e7Smrj 	 */
422912f080e7Smrj 	if (handlep == NULL) {
423012f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
423112f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
423212f080e7Smrj 		if (e == DDI_DMA_MAPPED) {
423312f080e7Smrj 			return (DDI_DMA_MAPOK);
423412f080e7Smrj 		} else {
423512f080e7Smrj 			return (DDI_DMA_NOMAPPING);
423612f080e7Smrj 		}
423712f080e7Smrj 	}
423812f080e7Smrj 
423912f080e7Smrj 	return (e);
424012f080e7Smrj #endif /* defined(__amd64) */
424112f080e7Smrj }
424212f080e7Smrj 
424312f080e7Smrj 
424412f080e7Smrj /*
424512f080e7Smrj  * rootnex_dma_mctl()
424612f080e7Smrj  *
424712f080e7Smrj  */
424812f080e7Smrj /* ARGSUSED */
424912f080e7Smrj static int
425012f080e7Smrj rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
425112f080e7Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
425212f080e7Smrj     uint_t cache_flags)
425312f080e7Smrj {
425412f080e7Smrj #if defined(__amd64)
425512f080e7Smrj 	/*
425612f080e7Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
425712f080e7Smrj 	 * common implementation in genunix, so they no longer have x86
425812f080e7Smrj 	 * specific functionality which called into dma_ctl.
425912f080e7Smrj 	 *
426012f080e7Smrj 	 * The rest of the obsoleted interfaces were never supported in the
426112f080e7Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
426212f080e7Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
426312f080e7Smrj 	 * implementation issues.
426412f080e7Smrj 	 *
426512f080e7Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
426612f080e7Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
426712f080e7Smrj 	 * reflect that now too...
426812f080e7Smrj 	 *
426912f080e7Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
427012f080e7Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
427112f080e7Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
427212f080e7Smrj 	 * that in a future release.
427312f080e7Smrj 	 */
427412f080e7Smrj 	return (DDI_FAILURE);
427512f080e7Smrj 
427612f080e7Smrj #else /* 32-bit x86 kernel */
427712f080e7Smrj 	ddi_dma_cookie_t lcookie;
427812f080e7Smrj 	ddi_dma_cookie_t *cookie;
427912f080e7Smrj 	rootnex_window_t *window;
428012f080e7Smrj 	ddi_dma_impl_t *hp;
428112f080e7Smrj 	rootnex_dma_t *dma;
428212f080e7Smrj 	uint_t nwin;
428312f080e7Smrj 	uint_t ccnt;
428412f080e7Smrj 	size_t len;
428512f080e7Smrj 	off_t off;
428612f080e7Smrj 	int e;
428712f080e7Smrj 
428812f080e7Smrj 
428912f080e7Smrj 	/*
429012f080e7Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
429112f080e7Smrj 	 * hacky since were optimizing for the current interfaces and so we can
429212f080e7Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
429312f080e7Smrj 	 * obsoleted routines someday soon.
429412f080e7Smrj 	 */
429512f080e7Smrj 
429612f080e7Smrj 	switch (request) {
429712f080e7Smrj 
429812f080e7Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
429912f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
430012f080e7Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
430112f080e7Smrj 
430212f080e7Smrj 		/*
430312f080e7Smrj 		 * convert segment to cookie. We don't distinguish between the
430412f080e7Smrj 		 * two :-)
430512f080e7Smrj 		 */
430612f080e7Smrj 		*cookie = *hp->dmai_cookie;
430712f080e7Smrj 		*lenp = cookie->dmac_size;
430812f080e7Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
430912f080e7Smrj 		return (DDI_SUCCESS);
431012f080e7Smrj 
431112f080e7Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
431212f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
431312f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
431412f080e7Smrj 
431512f080e7Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
431612f080e7Smrj 			return (DDI_DMA_STALE);
431712f080e7Smrj 		}
431812f080e7Smrj 
431912f080e7Smrj 		/* handle the case where we don't have any windows */
432012f080e7Smrj 		if (dma->dp_window == NULL) {
432112f080e7Smrj 			/*
432212f080e7Smrj 			 * if seg == NULL, and we don't have any windows,
432312f080e7Smrj 			 * return the first cookie in the sgl.
432412f080e7Smrj 			 */
432512f080e7Smrj 			if (*lenp == NULL) {
432612f080e7Smrj 				dma->dp_current_cookie = 0;
432712f080e7Smrj 				hp->dmai_cookie = dma->dp_cookies;
432812f080e7Smrj 				*objpp = (caddr_t)handle;
432912f080e7Smrj 				return (DDI_SUCCESS);
433012f080e7Smrj 
433112f080e7Smrj 			/* if we have more cookies, go to the next cookie */
433212f080e7Smrj 			} else {
433312f080e7Smrj 				if ((dma->dp_current_cookie + 1) >=
433412f080e7Smrj 				    dma->dp_sglinfo.si_sgl_size) {
433512f080e7Smrj 					return (DDI_DMA_DONE);
433612f080e7Smrj 				}
433712f080e7Smrj 				dma->dp_current_cookie++;
433812f080e7Smrj 				hp->dmai_cookie++;
433912f080e7Smrj 				return (DDI_SUCCESS);
434012f080e7Smrj 			}
434112f080e7Smrj 		}
434212f080e7Smrj 
434312f080e7Smrj 		/* We have one or more windows */
434412f080e7Smrj 		window = &dma->dp_window[dma->dp_current_win];
434512f080e7Smrj 
434612f080e7Smrj 		/*
434712f080e7Smrj 		 * if seg == NULL, return the first cookie in the current
434812f080e7Smrj 		 * window
434912f080e7Smrj 		 */
435012f080e7Smrj 		if (*lenp == NULL) {
435112f080e7Smrj 			dma->dp_current_cookie = 0;
4352cf4e9a1dSmrj 			hp->dmai_cookie = window->wd_first_cookie;
435312f080e7Smrj 
435412f080e7Smrj 		/*
435512f080e7Smrj 		 * go to the next cookie in the window then see if we done with
435612f080e7Smrj 		 * this window.
435712f080e7Smrj 		 */
435812f080e7Smrj 		} else {
435912f080e7Smrj 			if ((dma->dp_current_cookie + 1) >=
436012f080e7Smrj 			    window->wd_cookie_cnt) {
436112f080e7Smrj 				return (DDI_DMA_DONE);
436212f080e7Smrj 			}
436312f080e7Smrj 			dma->dp_current_cookie++;
436412f080e7Smrj 			hp->dmai_cookie++;
436512f080e7Smrj 		}
436612f080e7Smrj 		*objpp = (caddr_t)handle;
436712f080e7Smrj 		return (DDI_SUCCESS);
436812f080e7Smrj 
436912f080e7Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
437012f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
437112f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
437212f080e7Smrj 
437312f080e7Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
437412f080e7Smrj 			return (DDI_DMA_STALE);
437512f080e7Smrj 		}
437612f080e7Smrj 
437712f080e7Smrj 		/* if win == NULL, return the first window in the bind */
437812f080e7Smrj 		if (*offp == NULL) {
437912f080e7Smrj 			nwin = 0;
438012f080e7Smrj 
438112f080e7Smrj 		/*
438212f080e7Smrj 		 * else, go to the next window then see if we're done with all
438312f080e7Smrj 		 * the windows.
438412f080e7Smrj 		 */
438512f080e7Smrj 		} else {
438612f080e7Smrj 			nwin = dma->dp_current_win + 1;
438712f080e7Smrj 			if (nwin >= hp->dmai_nwin) {
438812f080e7Smrj 				return (DDI_DMA_DONE);
438912f080e7Smrj 			}
439012f080e7Smrj 		}
439112f080e7Smrj 
439212f080e7Smrj 		/* switch to the next window */
439312f080e7Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
439412f080e7Smrj 		    &lcookie, &ccnt);
439512f080e7Smrj 		ASSERT(e == DDI_SUCCESS);
439612f080e7Smrj 		if (e != DDI_SUCCESS) {
439712f080e7Smrj 			return (DDI_DMA_STALE);
439812f080e7Smrj 		}
439912f080e7Smrj 
440012f080e7Smrj 		/* reset the cookie back to the first cookie in the window */
440112f080e7Smrj 		if (dma->dp_window != NULL) {
440212f080e7Smrj 			window = &dma->dp_window[dma->dp_current_win];
440312f080e7Smrj 			hp->dmai_cookie = window->wd_first_cookie;
440412f080e7Smrj 		} else {
440512f080e7Smrj 			hp->dmai_cookie = dma->dp_cookies;
440612f080e7Smrj 		}
440712f080e7Smrj 
440812f080e7Smrj 		*objpp = (caddr_t)handle;
440912f080e7Smrj 		return (DDI_SUCCESS);
441012f080e7Smrj 
441112f080e7Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
441212f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
441312f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
441412f080e7Smrj 		if (rootnex_state->r_dvma_call_list_id) {
441512f080e7Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
441612f080e7Smrj 		}
441712f080e7Smrj 		return (DDI_SUCCESS);
441812f080e7Smrj 
441912f080e7Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
442012f080e7Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
442112f080e7Smrj 		/* should never get here, handled in genunix */
442212f080e7Smrj 		ASSERT(0);
442312f080e7Smrj 		return (DDI_FAILURE);
442412f080e7Smrj 
442512f080e7Smrj 	case DDI_DMA_KVADDR:
442612f080e7Smrj 	case DDI_DMA_GETERR:
442712f080e7Smrj 	case DDI_DMA_COFF:
442812f080e7Smrj 		return (DDI_FAILURE);
442912f080e7Smrj 	}
443012f080e7Smrj 
443112f080e7Smrj 	return (DDI_FAILURE);
443212f080e7Smrj #endif /* defined(__amd64) */
44337c478bd9Sstevel@tonic-gate }
44347aec1d6eScindi 
443500d0963fSdilpreet 
443600d0963fSdilpreet /*
443700d0963fSdilpreet  * *********
443800d0963fSdilpreet  *  FMA Code
443900d0963fSdilpreet  * *********
444000d0963fSdilpreet  */
444100d0963fSdilpreet 
444200d0963fSdilpreet /*
444300d0963fSdilpreet  * rootnex_fm_init()
444400d0963fSdilpreet  *    FMA init busop
444500d0963fSdilpreet  */
44467aec1d6eScindi /* ARGSUSED */
44477aec1d6eScindi static int
444800d0963fSdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
444900d0963fSdilpreet     ddi_iblock_cookie_t *ibc)
44507aec1d6eScindi {
445100d0963fSdilpreet 	*ibc = rootnex_state->r_err_ibc;
445200d0963fSdilpreet 
445300d0963fSdilpreet 	return (ddi_system_fmcap);
445400d0963fSdilpreet }
445500d0963fSdilpreet 
445600d0963fSdilpreet /*
445700d0963fSdilpreet  * rootnex_dma_check()
445800d0963fSdilpreet  *    Function called after a dma fault occurred to find out whether the
445900d0963fSdilpreet  *    fault address is associated with a driver that is able to handle faults
446000d0963fSdilpreet  *    and recover from faults.
446100d0963fSdilpreet  */
446200d0963fSdilpreet /* ARGSUSED */
446300d0963fSdilpreet static int
446400d0963fSdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
446500d0963fSdilpreet     const void *not_used)
446600d0963fSdilpreet {
446700d0963fSdilpreet 	rootnex_window_t *window;
446800d0963fSdilpreet 	uint64_t start_addr;
446900d0963fSdilpreet 	uint64_t fault_addr;
447000d0963fSdilpreet 	ddi_dma_impl_t *hp;
447100d0963fSdilpreet 	rootnex_dma_t *dma;
447200d0963fSdilpreet 	uint64_t end_addr;
447300d0963fSdilpreet 	size_t csize;
447400d0963fSdilpreet 	int i;
447500d0963fSdilpreet 	int j;
447600d0963fSdilpreet 
447700d0963fSdilpreet 
447800d0963fSdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
447900d0963fSdilpreet 	hp = (ddi_dma_impl_t *)handle;
448000d0963fSdilpreet 	ASSERT(hp);
448100d0963fSdilpreet 
448200d0963fSdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
448300d0963fSdilpreet 
448400d0963fSdilpreet 	/* Get the address that we need to search for */
448500d0963fSdilpreet 	fault_addr = *(uint64_t *)addr;
448600d0963fSdilpreet 
448700d0963fSdilpreet 	/*
448800d0963fSdilpreet 	 * if we don't have any windows, we can just walk through all the
448900d0963fSdilpreet 	 * cookies.
449000d0963fSdilpreet 	 */
449100d0963fSdilpreet 	if (dma->dp_window == NULL) {
449200d0963fSdilpreet 		/* for each cookie */
449300d0963fSdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
449400d0963fSdilpreet 			/*
449500d0963fSdilpreet 			 * if the faulted address is within the physical address
449600d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
449700d0963fSdilpreet 			 */
449800d0963fSdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
449900d0963fSdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
450000d0963fSdilpreet 			    dma->dp_cookies[i].dmac_size))) {
450100d0963fSdilpreet 				return (DDI_FM_NONFATAL);
450200d0963fSdilpreet 			}
450300d0963fSdilpreet 		}
450400d0963fSdilpreet 
450500d0963fSdilpreet 		/* fault_addr not within this DMA handle */
450600d0963fSdilpreet 		return (DDI_FM_UNKNOWN);
450700d0963fSdilpreet 	}
450800d0963fSdilpreet 
450900d0963fSdilpreet 	/* we have mutiple windows, walk through each window */
451000d0963fSdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
451100d0963fSdilpreet 		window = &dma->dp_window[i];
451200d0963fSdilpreet 
451300d0963fSdilpreet 		/* Go through all the cookies in the window */
451400d0963fSdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
451500d0963fSdilpreet 
451600d0963fSdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
451700d0963fSdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
451800d0963fSdilpreet 
451900d0963fSdilpreet 			/*
452000d0963fSdilpreet 			 * if we are trimming the first cookie in the window,
452100d0963fSdilpreet 			 * and this is the first cookie, adjust the start
452200d0963fSdilpreet 			 * address and size of the cookie to account for the
452300d0963fSdilpreet 			 * trim.
452400d0963fSdilpreet 			 */
452500d0963fSdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
452600d0963fSdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
452700d0963fSdilpreet 				csize = window->wd_trim.tr_first_size;
452800d0963fSdilpreet 			}
452900d0963fSdilpreet 
453000d0963fSdilpreet 			/*
453100d0963fSdilpreet 			 * if we are trimming the last cookie in the window,
453200d0963fSdilpreet 			 * and this is the last cookie, adjust the start
453300d0963fSdilpreet 			 * address and size of the cookie to account for the
453400d0963fSdilpreet 			 * trim.
453500d0963fSdilpreet 			 */
453600d0963fSdilpreet 			if (window->wd_trim.tr_trim_last &&
453700d0963fSdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
453800d0963fSdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
453900d0963fSdilpreet 				csize = window->wd_trim.tr_last_size;
454000d0963fSdilpreet 			}
454100d0963fSdilpreet 
454200d0963fSdilpreet 			end_addr = start_addr + csize;
454300d0963fSdilpreet 
454400d0963fSdilpreet 			/*
454500d0963fSdilpreet 			 * if the faulted address is within the physical address
454600d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
454700d0963fSdilpreet 			 */
454800d0963fSdilpreet 			if ((fault_addr >= start_addr) &&
454900d0963fSdilpreet 			    (fault_addr <= end_addr)) {
455000d0963fSdilpreet 				return (DDI_FM_NONFATAL);
455100d0963fSdilpreet 			}
455200d0963fSdilpreet 		}
455300d0963fSdilpreet 	}
455400d0963fSdilpreet 
455500d0963fSdilpreet 	/* fault_addr not within this DMA handle */
455600d0963fSdilpreet 	return (DDI_FM_UNKNOWN);
45577aec1d6eScindi }
4558