xref: /titanic_54/usr/src/uts/i86pc/io/rootnex.c (revision 20906b23a341979b80c2e40ac68778cd082f5458)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
500d0963fSdilpreet  * Common Development and Distribution License (the "License").
600d0963fSdilpreet  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22d21b39ddSmrj  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate /*
2712f080e7Smrj  * x86 root nexus driver
287c478bd9Sstevel@tonic-gate  */
297c478bd9Sstevel@tonic-gate 
307c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
317c478bd9Sstevel@tonic-gate #include <sys/conf.h>
327c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
337c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
347c478bd9Sstevel@tonic-gate #include <sys/debug.h>
357c478bd9Sstevel@tonic-gate #include <sys/psw.h>
367c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h>
377c478bd9Sstevel@tonic-gate #include <sys/promif.h>
387c478bd9Sstevel@tonic-gate #include <sys/devops.h>
397c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
407c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
417c478bd9Sstevel@tonic-gate #include <vm/seg.h>
427c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
437c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h>
447c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
457c478bd9Sstevel@tonic-gate #include <sys/mman.h>
467c478bd9Sstevel@tonic-gate #include <vm/hat.h>
477c478bd9Sstevel@tonic-gate #include <vm/as.h>
487c478bd9Sstevel@tonic-gate #include <vm/page.h>
497c478bd9Sstevel@tonic-gate #include <sys/avintr.h>
507c478bd9Sstevel@tonic-gate #include <sys/errno.h>
517c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
527c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
537c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
547c478bd9Sstevel@tonic-gate #include <sys/sunndi.h>
557a364d25Sschwartz #include <sys/mach_intr.h>
567c478bd9Sstevel@tonic-gate #include <sys/psm.h>
577c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
5812f080e7Smrj #include <sys/atomic.h>
5912f080e7Smrj #include <sys/sdt.h>
6012f080e7Smrj #include <sys/rootnex.h>
6112f080e7Smrj #include <vm/hat_i86.h>
6200d0963fSdilpreet #include <sys/ddifm.h>
6336945f79Smrj #include <sys/ddi_isa.h>
647c478bd9Sstevel@tonic-gate 
65843e1988Sjohnlev #ifdef __xpv
66843e1988Sjohnlev #include <sys/bootinfo.h>
67843e1988Sjohnlev #include <sys/hypervisor.h>
68843e1988Sjohnlev #include <sys/bootconf.h>
69843e1988Sjohnlev #include <vm/kboot_mmu.h>
70*20906b23SVikram Hegde #else
71*20906b23SVikram Hegde #include <sys/intel_iommu.h>
72843e1988Sjohnlev #endif
73843e1988Sjohnlev 
7486c1f4dcSVikram Hegde 
7512f080e7Smrj /*
7612f080e7Smrj  * enable/disable extra checking of function parameters. Useful for debugging
7712f080e7Smrj  * drivers.
7812f080e7Smrj  */
7912f080e7Smrj #ifdef	DEBUG
8012f080e7Smrj int rootnex_alloc_check_parms = 1;
8112f080e7Smrj int rootnex_bind_check_parms = 1;
8212f080e7Smrj int rootnex_bind_check_inuse = 1;
8312f080e7Smrj int rootnex_unbind_verify_buffer = 0;
8412f080e7Smrj int rootnex_sync_check_parms = 1;
8512f080e7Smrj #else
8612f080e7Smrj int rootnex_alloc_check_parms = 0;
8712f080e7Smrj int rootnex_bind_check_parms = 0;
8812f080e7Smrj int rootnex_bind_check_inuse = 0;
8912f080e7Smrj int rootnex_unbind_verify_buffer = 0;
9012f080e7Smrj int rootnex_sync_check_parms = 0;
9112f080e7Smrj #endif
927c478bd9Sstevel@tonic-gate 
937aec1d6eScindi /* Master Abort and Target Abort panic flag */
947aec1d6eScindi int rootnex_fm_ma_ta_panic_flag = 0;
957aec1d6eScindi 
9612f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
977c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1;
987c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1;
997c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list;
1007c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1017c478bd9Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1027c478bd9Sstevel@tonic-gate 
1037c478bd9Sstevel@tonic-gate /*
10412f080e7Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
10512f080e7Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
10612f080e7Smrj  * the offset and size into ddi_dma_sync().
1077c478bd9Sstevel@tonic-gate  */
10812f080e7Smrj int rootnex_sync_ignore_params = 0;
1097c478bd9Sstevel@tonic-gate 
1107c478bd9Sstevel@tonic-gate /*
11112f080e7Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
11212f080e7Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
11312f080e7Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
11412f080e7Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
11512f080e7Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
11612f080e7Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
11712f080e7Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
11812f080e7Smrj  * attach.
1197c478bd9Sstevel@tonic-gate  */
12012f080e7Smrj #if defined(__amd64)
12112f080e7Smrj int rootnex_prealloc_cookies = 65;
12212f080e7Smrj int rootnex_prealloc_windows = 4;
12312f080e7Smrj int rootnex_prealloc_copybuf = 2;
12412f080e7Smrj #else
12512f080e7Smrj int rootnex_prealloc_cookies = 33;
12612f080e7Smrj int rootnex_prealloc_windows = 4;
12712f080e7Smrj int rootnex_prealloc_copybuf = 2;
12812f080e7Smrj #endif
1297c478bd9Sstevel@tonic-gate 
13012f080e7Smrj /* driver global state */
13112f080e7Smrj static rootnex_state_t *rootnex_state;
13212f080e7Smrj 
13312f080e7Smrj /* shortcut to rootnex counters */
13412f080e7Smrj static uint64_t *rootnex_cnt;
1357c478bd9Sstevel@tonic-gate 
1367c478bd9Sstevel@tonic-gate /*
13712f080e7Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1387c478bd9Sstevel@tonic-gate  */
13912f080e7Smrj /* statically defined integer/boolean properties for the root node */
14012f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = {
14112f080e7Smrj 	{ "PAGESIZE",			PAGESIZE },
14212f080e7Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
14312f080e7Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
14412f080e7Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
14512f080e7Smrj };
14612f080e7Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
1477c478bd9Sstevel@tonic-gate 
148843e1988Sjohnlev #ifdef __xpv
149843e1988Sjohnlev typedef maddr_t rootnex_addr_t;
150843e1988Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
151843e1988Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
152843e1988Sjohnlev #else
153843e1988Sjohnlev typedef paddr_t rootnex_addr_t;
154843e1988Sjohnlev #endif
155843e1988Sjohnlev 
156*20906b23SVikram Hegde #if !defined(__xpv)
157*20906b23SVikram Hegde char _depends_on[] = "mach/pcplusmp misc/iommulib";
158*20906b23SVikram Hegde #endif
1597c478bd9Sstevel@tonic-gate 
16012f080e7Smrj static struct cb_ops rootnex_cb_ops = {
16112f080e7Smrj 	nodev,		/* open */
16212f080e7Smrj 	nodev,		/* close */
16312f080e7Smrj 	nodev,		/* strategy */
16412f080e7Smrj 	nodev,		/* print */
16512f080e7Smrj 	nodev,		/* dump */
16612f080e7Smrj 	nodev,		/* read */
16712f080e7Smrj 	nodev,		/* write */
16812f080e7Smrj 	nodev,		/* ioctl */
16912f080e7Smrj 	nodev,		/* devmap */
17012f080e7Smrj 	nodev,		/* mmap */
17112f080e7Smrj 	nodev,		/* segmap */
17212f080e7Smrj 	nochpoll,	/* chpoll */
17312f080e7Smrj 	ddi_prop_op,	/* cb_prop_op */
17412f080e7Smrj 	NULL,		/* struct streamtab */
17512f080e7Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
17612f080e7Smrj 	CB_REV,		/* Rev */
17712f080e7Smrj 	nodev,		/* cb_aread */
17812f080e7Smrj 	nodev		/* cb_awrite */
17912f080e7Smrj };
1807c478bd9Sstevel@tonic-gate 
18112f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1827c478bd9Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
18312f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1847c478bd9Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1857c478bd9Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
18612f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1877c478bd9Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
18812f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
18912f080e7Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
19012f080e7Smrj     ddi_dma_handle_t *handlep);
19112f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
19212f080e7Smrj     ddi_dma_handle_t handle);
19312f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
19412f080e7Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
19512f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
19612f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
19712f080e7Smrj     ddi_dma_handle_t handle);
19812f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
19912f080e7Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
20012f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
20112f080e7Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
20212f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
20312f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2047c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2057c478bd9Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
20612f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
20712f080e7Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
20800d0963fSdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
20900d0963fSdilpreet     ddi_iblock_cookie_t *ibc);
21012f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
21112f080e7Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
2127c478bd9Sstevel@tonic-gate 
213*20906b23SVikram Hegde static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
214*20906b23SVikram Hegde     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
215*20906b23SVikram Hegde     ddi_dma_handle_t *handlep);
216*20906b23SVikram Hegde static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
217*20906b23SVikram Hegde     ddi_dma_handle_t handle);
218*20906b23SVikram Hegde static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
219*20906b23SVikram Hegde     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
220*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
221*20906b23SVikram Hegde static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
222*20906b23SVikram Hegde     ddi_dma_handle_t handle);
223*20906b23SVikram Hegde static void rootnex_coredma_reset_cookies(dev_info_t *dip,
224*20906b23SVikram Hegde     ddi_dma_handle_t handle);
225*20906b23SVikram Hegde static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
226*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
227*20906b23SVikram Hegde static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
228*20906b23SVikram Hegde     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
229*20906b23SVikram Hegde static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
230*20906b23SVikram Hegde     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
231*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
232*20906b23SVikram Hegde static int rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
233*20906b23SVikram Hegde     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
234*20906b23SVikram Hegde static int rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip,
235*20906b23SVikram Hegde     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
236*20906b23SVikram Hegde     size_t *lenp, caddr_t *objpp, uint_t cache_flags);
2377c478bd9Sstevel@tonic-gate 
2387c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2397c478bd9Sstevel@tonic-gate 	BUSO_REV,
2407c478bd9Sstevel@tonic-gate 	rootnex_map,
2417c478bd9Sstevel@tonic-gate 	NULL,
2427c478bd9Sstevel@tonic-gate 	NULL,
2437c478bd9Sstevel@tonic-gate 	NULL,
2447c478bd9Sstevel@tonic-gate 	rootnex_map_fault,
2457c478bd9Sstevel@tonic-gate 	rootnex_dma_map,
2467c478bd9Sstevel@tonic-gate 	rootnex_dma_allochdl,
2477c478bd9Sstevel@tonic-gate 	rootnex_dma_freehdl,
2487c478bd9Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2497c478bd9Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
25012f080e7Smrj 	rootnex_dma_sync,
2517c478bd9Sstevel@tonic-gate 	rootnex_dma_win,
2527c478bd9Sstevel@tonic-gate 	rootnex_dma_mctl,
2537c478bd9Sstevel@tonic-gate 	rootnex_ctlops,
2547c478bd9Sstevel@tonic-gate 	ddi_bus_prop_op,
2557c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2567c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2577c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2587c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2597c478bd9Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2607c478bd9Sstevel@tonic-gate 	0,			/* bus_config */
2617c478bd9Sstevel@tonic-gate 	0,			/* bus_unconfig */
26200d0963fSdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2637c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2647c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2657c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2667c478bd9Sstevel@tonic-gate 	NULL,			/* bus_powr */
2677c478bd9Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2687c478bd9Sstevel@tonic-gate };
2697c478bd9Sstevel@tonic-gate 
27012f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
27112f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2727c478bd9Sstevel@tonic-gate 
2737c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2747c478bd9Sstevel@tonic-gate 	DEVO_REV,
27512f080e7Smrj 	0,
27612f080e7Smrj 	ddi_no_info,
2777c478bd9Sstevel@tonic-gate 	nulldev,
27812f080e7Smrj 	nulldev,
2797c478bd9Sstevel@tonic-gate 	rootnex_attach,
28012f080e7Smrj 	rootnex_detach,
28112f080e7Smrj 	nulldev,
28212f080e7Smrj 	&rootnex_cb_ops,
2837c478bd9Sstevel@tonic-gate 	&rootnex_bus_ops
2847c478bd9Sstevel@tonic-gate };
2857c478bd9Sstevel@tonic-gate 
28612f080e7Smrj static struct modldrv rootnex_modldrv = {
28712f080e7Smrj 	&mod_driverops,
288613b2871SRichard Bean 	"i86pc root nexus",
28912f080e7Smrj 	&rootnex_ops
2907c478bd9Sstevel@tonic-gate };
2917c478bd9Sstevel@tonic-gate 
29212f080e7Smrj static struct modlinkage rootnex_modlinkage = {
29312f080e7Smrj 	MODREV_1,
29412f080e7Smrj 	(void *)&rootnex_modldrv,
29512f080e7Smrj 	NULL
2967c478bd9Sstevel@tonic-gate };
2977c478bd9Sstevel@tonic-gate 
298*20906b23SVikram Hegde static iommulib_nexops_t iommulib_nexops = {
299*20906b23SVikram Hegde 	IOMMU_NEXOPS_VERSION,
300*20906b23SVikram Hegde 	"Rootnex IOMMU ops Vers 1.1",
301*20906b23SVikram Hegde 	NULL,
302*20906b23SVikram Hegde 	rootnex_coredma_allochdl,
303*20906b23SVikram Hegde 	rootnex_coredma_freehdl,
304*20906b23SVikram Hegde 	rootnex_coredma_bindhdl,
305*20906b23SVikram Hegde 	rootnex_coredma_unbindhdl,
306*20906b23SVikram Hegde 	rootnex_coredma_reset_cookies,
307*20906b23SVikram Hegde 	rootnex_coredma_get_cookies,
308*20906b23SVikram Hegde 	rootnex_coredma_sync,
309*20906b23SVikram Hegde 	rootnex_coredma_win,
310*20906b23SVikram Hegde 	rootnex_coredma_map,
311*20906b23SVikram Hegde 	rootnex_coredma_mctl
312*20906b23SVikram Hegde };
3137c478bd9Sstevel@tonic-gate 
31412f080e7Smrj /*
31512f080e7Smrj  *  extern hacks
31612f080e7Smrj  */
31712f080e7Smrj extern struct seg_ops segdev_ops;
31812f080e7Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
31912f080e7Smrj #ifdef	DDI_MAP_DEBUG
32012f080e7Smrj extern int ddi_map_debug_flag;
32112f080e7Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
32212f080e7Smrj #endif
32312f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
32412f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
32512f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
32612f080e7Smrj     psm_intr_op_t, int *);
32712f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
32812f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
32936945f79Smrj 
33012f080e7Smrj /*
33112f080e7Smrj  * Use device arena to use for device control register mappings.
33212f080e7Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
33312f080e7Smrj  * to avoid this address range to prevent undesired device activity.
33412f080e7Smrj  */
33512f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
33612f080e7Smrj extern void device_arena_free(void * vaddr, size_t size);
33712f080e7Smrj 
33812f080e7Smrj 
33912f080e7Smrj /*
34012f080e7Smrj  *  Internal functions
34112f080e7Smrj  */
34212f080e7Smrj static int rootnex_dma_init();
34312f080e7Smrj static void rootnex_add_props(dev_info_t *);
34412f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
34512f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
34612f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
34712f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
34812f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
34912f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
35012f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
35112f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
35212f080e7Smrj     ddi_dma_attr_t *attr);
35312f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
35412f080e7Smrj     rootnex_sglinfo_t *sglinfo);
35512f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
35612f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
35712f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
35812f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
35912f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
36012f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
36112f080e7Smrj     ddi_dma_attr_t *attr, int kmflag);
36212f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
36312f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
36412f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
36512f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
36612f080e7Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
36712f080e7Smrj     size_t *copybuf_used, page_t **cur_pp);
36812f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
36912f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
37012f080e7Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
37112f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
37212f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
37312f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
37412f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
37512f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
37612f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
37712f080e7Smrj     off_t offset, size_t size, uint_t cache_flags);
37812f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
37900d0963fSdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
38000d0963fSdilpreet     const void *comp_addr, const void *not_used);
38112f080e7Smrj 
38212f080e7Smrj /*
38312f080e7Smrj  * _init()
38412f080e7Smrj  *
38512f080e7Smrj  */
3867c478bd9Sstevel@tonic-gate int
3877c478bd9Sstevel@tonic-gate _init(void)
3887c478bd9Sstevel@tonic-gate {
38912f080e7Smrj 
39012f080e7Smrj 	rootnex_state = NULL;
39112f080e7Smrj 	return (mod_install(&rootnex_modlinkage));
3927c478bd9Sstevel@tonic-gate }
3937c478bd9Sstevel@tonic-gate 
39412f080e7Smrj 
39512f080e7Smrj /*
39612f080e7Smrj  * _info()
39712f080e7Smrj  *
39812f080e7Smrj  */
39912f080e7Smrj int
40012f080e7Smrj _info(struct modinfo *modinfop)
40112f080e7Smrj {
40212f080e7Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
40312f080e7Smrj }
40412f080e7Smrj 
40512f080e7Smrj 
40612f080e7Smrj /*
40712f080e7Smrj  * _fini()
40812f080e7Smrj  *
40912f080e7Smrj  */
4107c478bd9Sstevel@tonic-gate int
4117c478bd9Sstevel@tonic-gate _fini(void)
4127c478bd9Sstevel@tonic-gate {
4137c478bd9Sstevel@tonic-gate 	return (EBUSY);
4147c478bd9Sstevel@tonic-gate }
4157c478bd9Sstevel@tonic-gate 
41612f080e7Smrj 
41712f080e7Smrj /*
41812f080e7Smrj  * rootnex_attach()
41912f080e7Smrj  *
42012f080e7Smrj  */
42112f080e7Smrj static int
42212f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4237c478bd9Sstevel@tonic-gate {
4247aec1d6eScindi 	int fmcap;
42512f080e7Smrj 	int e;
42612f080e7Smrj 
42712f080e7Smrj 	switch (cmd) {
42812f080e7Smrj 	case DDI_ATTACH:
42912f080e7Smrj 		break;
43012f080e7Smrj 	case DDI_RESUME:
43112f080e7Smrj 		return (DDI_SUCCESS);
43212f080e7Smrj 	default:
43312f080e7Smrj 		return (DDI_FAILURE);
4347c478bd9Sstevel@tonic-gate 	}
4357c478bd9Sstevel@tonic-gate 
4367c478bd9Sstevel@tonic-gate 	/*
43712f080e7Smrj 	 * We should only have one instance of rootnex. Save it away since we
43812f080e7Smrj 	 * don't have an easy way to get it back later.
4397c478bd9Sstevel@tonic-gate 	 */
44012f080e7Smrj 	ASSERT(rootnex_state == NULL);
44112f080e7Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
4427c478bd9Sstevel@tonic-gate 
44312f080e7Smrj 	rootnex_state->r_dip = dip;
4447aec1d6eScindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
44512f080e7Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
44612f080e7Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
44786c1f4dcSVikram Hegde 	rootnex_state->r_intel_iommu_enabled = B_FALSE;
4487c478bd9Sstevel@tonic-gate 
4497aec1d6eScindi 	/*
4507aec1d6eScindi 	 * Set minimum fm capability level for i86pc platforms and then
4517aec1d6eScindi 	 * initialize error handling. Since we're the rootnex, we don't
4527aec1d6eScindi 	 * care what's returned in the fmcap field.
4537aec1d6eScindi 	 */
45400d0963fSdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
45500d0963fSdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4567aec1d6eScindi 	fmcap = ddi_system_fmcap;
4577aec1d6eScindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4587aec1d6eScindi 
45912f080e7Smrj 	/* initialize DMA related state */
46012f080e7Smrj 	e = rootnex_dma_init();
46112f080e7Smrj 	if (e != DDI_SUCCESS) {
46212f080e7Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
46312f080e7Smrj 		return (DDI_FAILURE);
46412f080e7Smrj 	}
46512f080e7Smrj 
46612f080e7Smrj 	/* Add static root node properties */
46712f080e7Smrj 	rootnex_add_props(dip);
46812f080e7Smrj 
46912f080e7Smrj 	/* since we can't call ddi_report_dev() */
47012f080e7Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
47112f080e7Smrj 
47212f080e7Smrj 	/* Initialize rootnex event handle */
47312f080e7Smrj 	i_ddi_rootnex_init_events(dip);
47412f080e7Smrj 
475*20906b23SVikram Hegde #if !defined(__xpv)
47686c1f4dcSVikram Hegde #if defined(__amd64)
47786c1f4dcSVikram Hegde 	/* probe intel iommu */
47886c1f4dcSVikram Hegde 	intel_iommu_probe_and_parse();
47986c1f4dcSVikram Hegde 
48086c1f4dcSVikram Hegde 	/* attach the iommu nodes */
48186c1f4dcSVikram Hegde 	if (intel_iommu_support) {
48286c1f4dcSVikram Hegde 		if (intel_iommu_attach_dmar_nodes() == DDI_SUCCESS) {
48386c1f4dcSVikram Hegde 			rootnex_state->r_intel_iommu_enabled = B_TRUE;
48486c1f4dcSVikram Hegde 		} else {
48586c1f4dcSVikram Hegde 			intel_iommu_release_dmar_info();
48686c1f4dcSVikram Hegde 		}
48786c1f4dcSVikram Hegde 	}
48886c1f4dcSVikram Hegde #endif
48986c1f4dcSVikram Hegde 
490*20906b23SVikram Hegde 	e = iommulib_nexus_register(dip, &iommulib_nexops,
491*20906b23SVikram Hegde 	    &rootnex_state->r_iommulib_handle);
492*20906b23SVikram Hegde 
493*20906b23SVikram Hegde 	ASSERT(e == DDI_SUCCESS);
494*20906b23SVikram Hegde #endif
495*20906b23SVikram Hegde 
49612f080e7Smrj 	return (DDI_SUCCESS);
49712f080e7Smrj }
49812f080e7Smrj 
49912f080e7Smrj 
50012f080e7Smrj /*
50112f080e7Smrj  * rootnex_detach()
50212f080e7Smrj  *
50312f080e7Smrj  */
5047c478bd9Sstevel@tonic-gate /*ARGSUSED*/
5057c478bd9Sstevel@tonic-gate static int
50612f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5077c478bd9Sstevel@tonic-gate {
50812f080e7Smrj 	switch (cmd) {
50912f080e7Smrj 	case DDI_SUSPEND:
51012f080e7Smrj 		break;
51112f080e7Smrj 	default:
51212f080e7Smrj 		return (DDI_FAILURE);
51312f080e7Smrj 	}
5147c478bd9Sstevel@tonic-gate 
51512f080e7Smrj 	return (DDI_SUCCESS);
51612f080e7Smrj }
5177c478bd9Sstevel@tonic-gate 
5187c478bd9Sstevel@tonic-gate 
51912f080e7Smrj /*
52012f080e7Smrj  * rootnex_dma_init()
52112f080e7Smrj  *
52212f080e7Smrj  */
52312f080e7Smrj /*ARGSUSED*/
52412f080e7Smrj static int
52512f080e7Smrj rootnex_dma_init()
52612f080e7Smrj {
52712f080e7Smrj 	size_t bufsize;
52812f080e7Smrj 
52912f080e7Smrj 
53012f080e7Smrj 	/*
53112f080e7Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
53212f080e7Smrj 	 * pre-alloc in dma_alloc_handle
53312f080e7Smrj 	 */
53412f080e7Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
53512f080e7Smrj 	rootnex_state->r_prealloc_size =
53612f080e7Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
53712f080e7Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
53812f080e7Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
53912f080e7Smrj 
54012f080e7Smrj 	/*
54112f080e7Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
54212f080e7Smrj 	 * allocate 16 extra bytes for struct pointer alignment
54312f080e7Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
54412f080e7Smrj 	 */
54512f080e7Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
54612f080e7Smrj 	    rootnex_state->r_prealloc_size + 0x10;
54712f080e7Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
54812f080e7Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
54912f080e7Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
55012f080e7Smrj 		return (DDI_FAILURE);
55112f080e7Smrj 	}
5527c478bd9Sstevel@tonic-gate 
5537c478bd9Sstevel@tonic-gate 	/*
5547c478bd9Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
5557c478bd9Sstevel@tonic-gate 	 * for.
5567c478bd9Sstevel@tonic-gate 	 */
5577c478bd9Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
5587c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
5597c478bd9Sstevel@tonic-gate 
5607c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
5617c478bd9Sstevel@tonic-gate }
5627c478bd9Sstevel@tonic-gate 
5637c478bd9Sstevel@tonic-gate 
5647c478bd9Sstevel@tonic-gate /*
56512f080e7Smrj  * rootnex_add_props()
56612f080e7Smrj  *
5677c478bd9Sstevel@tonic-gate  */
5687c478bd9Sstevel@tonic-gate static void
56912f080e7Smrj rootnex_add_props(dev_info_t *dip)
5707c478bd9Sstevel@tonic-gate {
57112f080e7Smrj 	rootnex_intprop_t *rpp;
5727c478bd9Sstevel@tonic-gate 	int i;
5737c478bd9Sstevel@tonic-gate 
57412f080e7Smrj 	/* Add static integer/boolean properties to the root node */
57512f080e7Smrj 	rpp = rootnex_intprp;
57612f080e7Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
57712f080e7Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
57812f080e7Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
57912f080e7Smrj 	}
5807c478bd9Sstevel@tonic-gate }
5817c478bd9Sstevel@tonic-gate 
58212f080e7Smrj 
58312f080e7Smrj 
5847c478bd9Sstevel@tonic-gate /*
58512f080e7Smrj  * *************************
58612f080e7Smrj  *  ctlops related routines
58712f080e7Smrj  * *************************
58812f080e7Smrj  */
58912f080e7Smrj 
59012f080e7Smrj /*
59112f080e7Smrj  * rootnex_ctlops()
5927c478bd9Sstevel@tonic-gate  *
5937c478bd9Sstevel@tonic-gate  */
594a195726fSgovinda /*ARGSUSED*/
5957c478bd9Sstevel@tonic-gate static int
59612f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
59712f080e7Smrj     void *arg, void *result)
5987c478bd9Sstevel@tonic-gate {
59912f080e7Smrj 	int n, *ptr;
60012f080e7Smrj 	struct ddi_parent_private_data *pdp;
6017c478bd9Sstevel@tonic-gate 
60212f080e7Smrj 	switch (ctlop) {
60312f080e7Smrj 	case DDI_CTLOPS_DMAPMAPC:
6047c478bd9Sstevel@tonic-gate 		/*
60512f080e7Smrj 		 * Return 'partial' to indicate that dma mapping
60612f080e7Smrj 		 * has to be done in the main MMU.
6077c478bd9Sstevel@tonic-gate 		 */
60812f080e7Smrj 		return (DDI_DMA_PARTIAL);
6097c478bd9Sstevel@tonic-gate 
61012f080e7Smrj 	case DDI_CTLOPS_BTOP:
6117c478bd9Sstevel@tonic-gate 		/*
61212f080e7Smrj 		 * Convert byte count input to physical page units.
61312f080e7Smrj 		 * (byte counts that are not a page-size multiple
61412f080e7Smrj 		 * are rounded down)
6157c478bd9Sstevel@tonic-gate 		 */
61612f080e7Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
6177c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
6187c478bd9Sstevel@tonic-gate 
61912f080e7Smrj 	case DDI_CTLOPS_PTOB:
6207c478bd9Sstevel@tonic-gate 		/*
62112f080e7Smrj 		 * Convert size in physical pages to bytes
6227c478bd9Sstevel@tonic-gate 		 */
62312f080e7Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
6247c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
6257c478bd9Sstevel@tonic-gate 
62612f080e7Smrj 	case DDI_CTLOPS_BTOPR:
6277c478bd9Sstevel@tonic-gate 		/*
62812f080e7Smrj 		 * Convert byte count input to physical page units
62912f080e7Smrj 		 * (byte counts that are not a page-size multiple
63012f080e7Smrj 		 * are rounded up)
6317c478bd9Sstevel@tonic-gate 		 */
63212f080e7Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
63312f080e7Smrj 		return (DDI_SUCCESS);
63412f080e7Smrj 
63512f080e7Smrj 	case DDI_CTLOPS_INITCHILD:
63612f080e7Smrj 		return (impl_ddi_sunbus_initchild(arg));
63712f080e7Smrj 
63812f080e7Smrj 	case DDI_CTLOPS_UNINITCHILD:
63912f080e7Smrj 		impl_ddi_sunbus_removechild(arg);
64012f080e7Smrj 		return (DDI_SUCCESS);
64112f080e7Smrj 
64212f080e7Smrj 	case DDI_CTLOPS_REPORTDEV:
64312f080e7Smrj 		return (rootnex_ctl_reportdev(rdip));
64412f080e7Smrj 
64512f080e7Smrj 	case DDI_CTLOPS_IOMIN:
6467c478bd9Sstevel@tonic-gate 		/*
64712f080e7Smrj 		 * Nothing to do here but reflect back..
6487c478bd9Sstevel@tonic-gate 		 */
6497c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
6507c478bd9Sstevel@tonic-gate 
65112f080e7Smrj 	case DDI_CTLOPS_REGSIZE:
65212f080e7Smrj 	case DDI_CTLOPS_NREGS:
65312f080e7Smrj 		break;
6547c478bd9Sstevel@tonic-gate 
65512f080e7Smrj 	case DDI_CTLOPS_SIDDEV:
65612f080e7Smrj 		if (ndi_dev_is_prom_node(rdip))
6577c478bd9Sstevel@tonic-gate 			return (DDI_SUCCESS);
65812f080e7Smrj 		if (ndi_dev_is_persistent_node(rdip))
65912f080e7Smrj 			return (DDI_SUCCESS);
6607c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6617c478bd9Sstevel@tonic-gate 
66212f080e7Smrj 	case DDI_CTLOPS_POWER:
66312f080e7Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
66412f080e7Smrj 
665a195726fSgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
66612f080e7Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
66712f080e7Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
66812f080e7Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
669a195726fSgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
670a195726fSgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
67112f080e7Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
67212f080e7Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
67312f080e7Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
67412f080e7Smrj 			    "1 or more reserved/obsolete operations.");
6757c478bd9Sstevel@tonic-gate 		}
67612f080e7Smrj 		return (DDI_FAILURE);
6777c478bd9Sstevel@tonic-gate 
6787c478bd9Sstevel@tonic-gate 	default:
6797c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6807c478bd9Sstevel@tonic-gate 	}
68112f080e7Smrj 	/*
68212f080e7Smrj 	 * The rest are for "hardware" properties
68312f080e7Smrj 	 */
68412f080e7Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
68512f080e7Smrj 		return (DDI_FAILURE);
6867c478bd9Sstevel@tonic-gate 
68712f080e7Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
68812f080e7Smrj 		ptr = (int *)result;
68912f080e7Smrj 		*ptr = pdp->par_nreg;
69012f080e7Smrj 	} else {
69112f080e7Smrj 		off_t *size = (off_t *)result;
6927c478bd9Sstevel@tonic-gate 
69312f080e7Smrj 		ptr = (int *)arg;
69412f080e7Smrj 		n = *ptr;
69512f080e7Smrj 		if (n >= pdp->par_nreg) {
69612f080e7Smrj 			return (DDI_FAILURE);
69712f080e7Smrj 		}
69812f080e7Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
69912f080e7Smrj 	}
7007c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
7017c478bd9Sstevel@tonic-gate }
7027c478bd9Sstevel@tonic-gate 
70312f080e7Smrj 
70412f080e7Smrj /*
70512f080e7Smrj  * rootnex_ctl_reportdev()
70612f080e7Smrj  *
70712f080e7Smrj  */
7087c478bd9Sstevel@tonic-gate static int
70912f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev)
71012f080e7Smrj {
71112f080e7Smrj 	int i, n, len, f_len = 0;
71212f080e7Smrj 	char *buf;
71312f080e7Smrj 
71412f080e7Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
71512f080e7Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
71612f080e7Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
71712f080e7Smrj 	len = strlen(buf);
71812f080e7Smrj 
71912f080e7Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
72012f080e7Smrj 
72112f080e7Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
72212f080e7Smrj 
72312f080e7Smrj 		if (i == 0)
72412f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
72512f080e7Smrj 			    ": ");
72612f080e7Smrj 		else
72712f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
72812f080e7Smrj 			    " and ");
72912f080e7Smrj 		len = strlen(buf);
73012f080e7Smrj 
73112f080e7Smrj 		switch (rp->regspec_bustype) {
73212f080e7Smrj 
73312f080e7Smrj 		case BTEISA:
73412f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
73512f080e7Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
73612f080e7Smrj 			break;
73712f080e7Smrj 
73812f080e7Smrj 		case BTISA:
73912f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
74012f080e7Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
74112f080e7Smrj 			break;
74212f080e7Smrj 
74312f080e7Smrj 		default:
74412f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
74512f080e7Smrj 			    "space %x offset %x",
74612f080e7Smrj 			    rp->regspec_bustype, rp->regspec_addr);
74712f080e7Smrj 			break;
74812f080e7Smrj 		}
74912f080e7Smrj 		len = strlen(buf);
75012f080e7Smrj 	}
75112f080e7Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
75212f080e7Smrj 		int pri;
75312f080e7Smrj 
75412f080e7Smrj 		if (i != 0) {
75512f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
75612f080e7Smrj 			    ",");
75712f080e7Smrj 			len = strlen(buf);
75812f080e7Smrj 		}
75912f080e7Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
76012f080e7Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
76112f080e7Smrj 		    " sparc ipl %d", pri);
76212f080e7Smrj 		len = strlen(buf);
76312f080e7Smrj 	}
76412f080e7Smrj #ifdef DEBUG
76512f080e7Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
76612f080e7Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
76712f080e7Smrj 		    "printed length 1024, real length %d", f_len);
76812f080e7Smrj 	}
76912f080e7Smrj #endif /* DEBUG */
77012f080e7Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
77112f080e7Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
77212f080e7Smrj 	return (DDI_SUCCESS);
77312f080e7Smrj }
77412f080e7Smrj 
77512f080e7Smrj 
77612f080e7Smrj /*
77712f080e7Smrj  * ******************
77812f080e7Smrj  *  map related code
77912f080e7Smrj  * ******************
78012f080e7Smrj  */
78112f080e7Smrj 
78212f080e7Smrj /*
78312f080e7Smrj  * rootnex_map()
78412f080e7Smrj  *
78512f080e7Smrj  */
78612f080e7Smrj static int
78712f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
78812f080e7Smrj     off_t len, caddr_t *vaddrp)
7897c478bd9Sstevel@tonic-gate {
7907c478bd9Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
7917c478bd9Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
7927c478bd9Sstevel@tonic-gate 	int error;
7937c478bd9Sstevel@tonic-gate 
7947c478bd9Sstevel@tonic-gate 	mp = &mr;
7957c478bd9Sstevel@tonic-gate 
7967c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
7977c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
7987c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
7997c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8007c478bd9Sstevel@tonic-gate 		break;
8017c478bd9Sstevel@tonic-gate 	default:
8027c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8037c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8047c478bd9Sstevel@tonic-gate 		    mp->map_op);
8057c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8067c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8077c478bd9Sstevel@tonic-gate 	}
8087c478bd9Sstevel@tonic-gate 
8097c478bd9Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8107c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8117c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8127c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8137c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8147c478bd9Sstevel@tonic-gate 	}
8157c478bd9Sstevel@tonic-gate 
8167c478bd9Sstevel@tonic-gate 	/*
8177c478bd9Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8187c478bd9Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8197c478bd9Sstevel@tonic-gate 	 */
8207c478bd9Sstevel@tonic-gate 
8217c478bd9Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8227c478bd9Sstevel@tonic-gate 
8237c478bd9Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8247c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8257c478bd9Sstevel@tonic-gate 		static char *out_of_range =
8267c478bd9Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8277c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8287c478bd9Sstevel@tonic-gate 
8297c478bd9Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8307c478bd9Sstevel@tonic-gate 		if (rp == NULL)  {
8317c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8327c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8337c478bd9Sstevel@tonic-gate 			    ddi_get_name(rdip));
8347c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8357c478bd9Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8367c478bd9Sstevel@tonic-gate 		}
8377c478bd9Sstevel@tonic-gate 
8387c478bd9Sstevel@tonic-gate 		/*
8397c478bd9Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8407c478bd9Sstevel@tonic-gate 		 */
8417c478bd9Sstevel@tonic-gate 
8427c478bd9Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8437c478bd9Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8447c478bd9Sstevel@tonic-gate 	}
8457c478bd9Sstevel@tonic-gate 
8467c478bd9Sstevel@tonic-gate 	/*
8477c478bd9Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8487c478bd9Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8497c478bd9Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8507c478bd9Sstevel@tonic-gate 	 */
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8537c478bd9Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8547c478bd9Sstevel@tonic-gate 
8557c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
856843e1988Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
857843e1988Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
858843e1988Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
859843e1988Sjohnlev 	    len, mp->map_handlep);
8607c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8617c478bd9Sstevel@tonic-gate 
8627c478bd9Sstevel@tonic-gate 	/*
8637c478bd9Sstevel@tonic-gate 	 * I/O or memory mapping:
8647c478bd9Sstevel@tonic-gate 	 *
8657c478bd9Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8667c478bd9Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8677c478bd9Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8687c478bd9Sstevel@tonic-gate 	 */
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8717c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8727c478bd9Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8737c478bd9Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8747c478bd9Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8757c478bd9Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8767c478bd9Sstevel@tonic-gate 	}
8777c478bd9Sstevel@tonic-gate 
8787c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8797c478bd9Sstevel@tonic-gate 		/*
8807c478bd9Sstevel@tonic-gate 		 * compatibility i/o mapping
8817c478bd9Sstevel@tonic-gate 		 */
8827c478bd9Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8837c478bd9Sstevel@tonic-gate 	} else {
8847c478bd9Sstevel@tonic-gate 		/*
8857c478bd9Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8867c478bd9Sstevel@tonic-gate 		 */
8877c478bd9Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8887c478bd9Sstevel@tonic-gate 	}
8897c478bd9Sstevel@tonic-gate 
8907c478bd9Sstevel@tonic-gate 	if (len != 0)
8917c478bd9Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
8927c478bd9Sstevel@tonic-gate 
8937c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
894843e1988Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
895843e1988Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8967c478bd9Sstevel@tonic-gate 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
8977c478bd9Sstevel@tonic-gate 	    offset, len, mp->map_handlep);
8987c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8997c478bd9Sstevel@tonic-gate 
9007c478bd9Sstevel@tonic-gate 	/*
9017c478bd9Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9027c478bd9Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9037c478bd9Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9047c478bd9Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9057c478bd9Sstevel@tonic-gate 	 */
9067c478bd9Sstevel@tonic-gate 
9077c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9087c478bd9Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9097c478bd9Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9107c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9117c478bd9Sstevel@tonic-gate 
9127c478bd9Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9137c478bd9Sstevel@tonic-gate 		return (error);
9147c478bd9Sstevel@tonic-gate 
9157c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
9167c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9177c478bd9Sstevel@tonic-gate 
9187c478bd9Sstevel@tonic-gate 		/*
9197c478bd9Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9207c478bd9Sstevel@tonic-gate 		 */
9217c478bd9Sstevel@tonic-gate 
9227c478bd9Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9237c478bd9Sstevel@tonic-gate 
9247c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9257c478bd9Sstevel@tonic-gate 
9267c478bd9Sstevel@tonic-gate 		/*
9277c478bd9Sstevel@tonic-gate 		 * Release mapping...
9287c478bd9Sstevel@tonic-gate 		 */
9297c478bd9Sstevel@tonic-gate 
9307c478bd9Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9317c478bd9Sstevel@tonic-gate 
9327c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9337c478bd9Sstevel@tonic-gate 
9347c478bd9Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9357c478bd9Sstevel@tonic-gate 
9367c478bd9Sstevel@tonic-gate 	default:
9377c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9387c478bd9Sstevel@tonic-gate 	}
9397c478bd9Sstevel@tonic-gate }
9407c478bd9Sstevel@tonic-gate 
9417c478bd9Sstevel@tonic-gate 
9427c478bd9Sstevel@tonic-gate /*
94312f080e7Smrj  * rootnex_map_fault()
9447c478bd9Sstevel@tonic-gate  *
9457c478bd9Sstevel@tonic-gate  *	fault in mappings for requestors
9467c478bd9Sstevel@tonic-gate  */
9477c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9487c478bd9Sstevel@tonic-gate static int
94912f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
95012f080e7Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
95112f080e7Smrj     uint_t lock)
9527c478bd9Sstevel@tonic-gate {
9537c478bd9Sstevel@tonic-gate 
9547c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9557c478bd9Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
9567c478bd9Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
9577c478bd9Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
9587c478bd9Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
9597c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9607c478bd9Sstevel@tonic-gate 
9617c478bd9Sstevel@tonic-gate 	/*
9627c478bd9Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
9637c478bd9Sstevel@tonic-gate 	 *
9647c478bd9Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9657c478bd9Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9667c478bd9Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9677c478bd9Sstevel@tonic-gate 	 */
9687c478bd9Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
969843e1988Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 		if (hat == NULL) {
9727c478bd9Sstevel@tonic-gate 			/*
9737c478bd9Sstevel@tonic-gate 			 * This is one plausible interpretation of
9747c478bd9Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9757c478bd9Sstevel@tonic-gate 			 * address space hat list which by convention is
9767c478bd9Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9777c478bd9Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9787c478bd9Sstevel@tonic-gate 			 */
9797c478bd9Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9807c478bd9Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9817c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9827c478bd9Sstevel@tonic-gate 		}
9837c478bd9Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9847c478bd9Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9857c478bd9Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9867c478bd9Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9877c478bd9Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9887c478bd9Sstevel@tonic-gate 	} else
9897c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
9907c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
9917c478bd9Sstevel@tonic-gate }
9927c478bd9Sstevel@tonic-gate 
9937c478bd9Sstevel@tonic-gate 
9947c478bd9Sstevel@tonic-gate /*
99512f080e7Smrj  * rootnex_map_regspec()
99612f080e7Smrj  *     we don't support mapping of I/O cards above 4Gb
9977c478bd9Sstevel@tonic-gate  */
9987c478bd9Sstevel@tonic-gate static int
99912f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
10007c478bd9Sstevel@tonic-gate {
1001843e1988Sjohnlev 	rootnex_addr_t rbase;
100212f080e7Smrj 	void *cvaddr;
100312f080e7Smrj 	uint_t npages, pgoffset;
100412f080e7Smrj 	struct regspec *rp;
100512f080e7Smrj 	ddi_acc_hdl_t *hp;
100612f080e7Smrj 	ddi_acc_impl_t *ap;
100712f080e7Smrj 	uint_t	hat_acc_flags;
1008843e1988Sjohnlev 	paddr_t pbase;
10097c478bd9Sstevel@tonic-gate 
101012f080e7Smrj 	rp = mp->map_obj.rp;
101112f080e7Smrj 	hp = mp->map_handlep;
101212f080e7Smrj 
101312f080e7Smrj #ifdef	DDI_MAP_DEBUG
101412f080e7Smrj 	ddi_map_debug(
101512f080e7Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
101612f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
101712f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
101812f080e7Smrj #endif	/* DDI_MAP_DEBUG */
10197c478bd9Sstevel@tonic-gate 
10207c478bd9Sstevel@tonic-gate 	/*
102112f080e7Smrj 	 * I/O or memory mapping
102212f080e7Smrj 	 *
102312f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
102412f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
102512f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
10267c478bd9Sstevel@tonic-gate 	 */
102712f080e7Smrj 
102812f080e7Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
102912f080e7Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
103012f080e7Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
103112f080e7Smrj 		    rp->regspec_addr, rp->regspec_size);
103212f080e7Smrj 		return (DDI_FAILURE);
10337c478bd9Sstevel@tonic-gate 	}
103412f080e7Smrj 
103512f080e7Smrj 	if (rp->regspec_bustype != 0) {
10367c478bd9Sstevel@tonic-gate 		/*
103712f080e7Smrj 		 * I/O space - needs a handle.
10387c478bd9Sstevel@tonic-gate 		 */
10397c478bd9Sstevel@tonic-gate 		if (hp == NULL) {
104012f080e7Smrj 			return (DDI_FAILURE);
10417c478bd9Sstevel@tonic-gate 		}
104212f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
104312f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
104412f080e7Smrj 		impl_acc_hdl_init(hp);
10457c478bd9Sstevel@tonic-gate 
104612f080e7Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
104712f080e7Smrj #ifdef  DDI_MAP_DEBUG
1048843e1988Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
1049843e1988Sjohnlev 			    "to I/O space is not supported.\n");
105012f080e7Smrj #endif  /* DDI_MAP_DEBUG */
105112f080e7Smrj 			return (DDI_ME_INVAL);
10527c478bd9Sstevel@tonic-gate 		} else {
10537c478bd9Sstevel@tonic-gate 			/*
105412f080e7Smrj 			 * 1275-compliant vs. compatibility i/o mapping
10557c478bd9Sstevel@tonic-gate 			 */
105612f080e7Smrj 			*vaddrp =
105712f080e7Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
105812f080e7Smrj 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
105912f080e7Smrj 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
1060843e1988Sjohnlev #ifdef __xpv
1061843e1988Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1062843e1988Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
1063843e1988Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
1064843e1988Sjohnlev 				    MMU_PAGEMASK));
1065843e1988Sjohnlev 			} else {
1066843e1988Sjohnlev 				hp->ah_pfn = mmu_btop(
1067843e1988Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
1068843e1988Sjohnlev 			}
1069843e1988Sjohnlev #else
107000d0963fSdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
1071843e1988Sjohnlev 			    MMU_PAGEMASK);
1072843e1988Sjohnlev #endif
107300d0963fSdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
107400d0963fSdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
10757c478bd9Sstevel@tonic-gate 		}
10767c478bd9Sstevel@tonic-gate 
107712f080e7Smrj #ifdef	DDI_MAP_DEBUG
107812f080e7Smrj 		ddi_map_debug(
107912f080e7Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
108012f080e7Smrj 		    rp->regspec_size, *vaddrp);
108112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
108212f080e7Smrj 		return (DDI_SUCCESS);
10837c478bd9Sstevel@tonic-gate 	}
10847c478bd9Sstevel@tonic-gate 
10857c478bd9Sstevel@tonic-gate 	/*
108612f080e7Smrj 	 * Memory space
108712f080e7Smrj 	 */
108812f080e7Smrj 
108912f080e7Smrj 	if (hp != NULL) {
109012f080e7Smrj 		/*
109112f080e7Smrj 		 * hat layer ignores
109212f080e7Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
109312f080e7Smrj 		 */
109412f080e7Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
109512f080e7Smrj 		case DDI_STRICTORDER_ACC:
109612f080e7Smrj 			hat_acc_flags = HAT_STRICTORDER;
109712f080e7Smrj 			break;
109812f080e7Smrj 		case DDI_UNORDERED_OK_ACC:
109912f080e7Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
110012f080e7Smrj 			break;
110112f080e7Smrj 		case DDI_MERGING_OK_ACC:
110212f080e7Smrj 			hat_acc_flags = HAT_MERGING_OK;
110312f080e7Smrj 			break;
110412f080e7Smrj 		case DDI_LOADCACHING_OK_ACC:
110512f080e7Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
110612f080e7Smrj 			break;
110712f080e7Smrj 		case DDI_STORECACHING_OK_ACC:
110812f080e7Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
110912f080e7Smrj 			break;
111012f080e7Smrj 		}
111112f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
111212f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
111312f080e7Smrj 		impl_acc_hdl_init(hp);
111412f080e7Smrj 		hp->ah_hat_flags = hat_acc_flags;
111512f080e7Smrj 	} else {
111612f080e7Smrj 		hat_acc_flags = HAT_STRICTORDER;
111712f080e7Smrj 	}
111812f080e7Smrj 
1119843e1988Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
1120843e1988Sjohnlev #ifdef __xpv
1121843e1988Sjohnlev 	/*
1122843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
1123843e1988Sjohnlev 	 * the MA to a PA.
1124843e1988Sjohnlev 	 */
1125843e1988Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1126843e1988Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
1127843e1988Sjohnlev 	} else {
1128843e1988Sjohnlev 		pbase = rbase;
1129843e1988Sjohnlev 	}
1130843e1988Sjohnlev #else
1131843e1988Sjohnlev 	pbase = rbase;
1132843e1988Sjohnlev #endif
1133843e1988Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
113412f080e7Smrj 
113512f080e7Smrj 	if (rp->regspec_size == 0) {
113612f080e7Smrj #ifdef  DDI_MAP_DEBUG
113712f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
113812f080e7Smrj #endif  /* DDI_MAP_DEBUG */
113912f080e7Smrj 		return (DDI_ME_INVAL);
114012f080e7Smrj 	}
114112f080e7Smrj 
114212f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1143843e1988Sjohnlev 		/* extra cast to make gcc happy */
1144843e1988Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
114512f080e7Smrj 	} else {
114612f080e7Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
114712f080e7Smrj 
114812f080e7Smrj #ifdef	DDI_MAP_DEBUG
1149843e1988Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1150843e1988Sjohnlev 		    "physical %llx", npages, pbase);
115112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
115212f080e7Smrj 
115312f080e7Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
115412f080e7Smrj 		if (cvaddr == NULL)
115512f080e7Smrj 			return (DDI_ME_NORESOURCES);
115612f080e7Smrj 
115712f080e7Smrj 		/*
115812f080e7Smrj 		 * Now map in the pages we've allocated...
115912f080e7Smrj 		 */
1160843e1988Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
1161843e1988Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
1162843e1988Sjohnlev 		    HAT_LOAD_LOCK);
116312f080e7Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
116400d0963fSdilpreet 
116500d0963fSdilpreet 		/* save away pfn and npages for FMA */
116600d0963fSdilpreet 		hp = mp->map_handlep;
116700d0963fSdilpreet 		if (hp) {
1168843e1988Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
116900d0963fSdilpreet 			hp->ah_pnum = npages;
117000d0963fSdilpreet 		}
117112f080e7Smrj 	}
117212f080e7Smrj 
117312f080e7Smrj #ifdef	DDI_MAP_DEBUG
117412f080e7Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
117512f080e7Smrj #endif	/* DDI_MAP_DEBUG */
117612f080e7Smrj 	return (DDI_SUCCESS);
117712f080e7Smrj }
117812f080e7Smrj 
117912f080e7Smrj 
118012f080e7Smrj /*
118112f080e7Smrj  * rootnex_unmap_regspec()
11827c478bd9Sstevel@tonic-gate  *
11837c478bd9Sstevel@tonic-gate  */
11847c478bd9Sstevel@tonic-gate static int
118512f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
11867c478bd9Sstevel@tonic-gate {
118712f080e7Smrj 	caddr_t addr = (caddr_t)*vaddrp;
118812f080e7Smrj 	uint_t npages, pgoffset;
118912f080e7Smrj 	struct regspec *rp;
11907c478bd9Sstevel@tonic-gate 
119112f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
119212f080e7Smrj 		return (0);
11937c478bd9Sstevel@tonic-gate 
119412f080e7Smrj 	rp = mp->map_obj.rp;
11957c478bd9Sstevel@tonic-gate 
119612f080e7Smrj 	if (rp->regspec_size == 0) {
119712f080e7Smrj #ifdef  DDI_MAP_DEBUG
119812f080e7Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
119912f080e7Smrj #endif  /* DDI_MAP_DEBUG */
120012f080e7Smrj 		return (DDI_ME_INVAL);
12017c478bd9Sstevel@tonic-gate 	}
12027c478bd9Sstevel@tonic-gate 
12037c478bd9Sstevel@tonic-gate 	/*
120412f080e7Smrj 	 * I/O or memory mapping:
12057c478bd9Sstevel@tonic-gate 	 *
120612f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
120712f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
120812f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
12097c478bd9Sstevel@tonic-gate 	 */
121012f080e7Smrj 	if (rp->regspec_bustype != 0) {
12117c478bd9Sstevel@tonic-gate 		/*
121212f080e7Smrj 		 * This is I/O space, which requires no particular
121312f080e7Smrj 		 * processing on unmap since it isn't mapped in the
121412f080e7Smrj 		 * first place.
12157c478bd9Sstevel@tonic-gate 		 */
12167c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
12177c478bd9Sstevel@tonic-gate 	}
12187c478bd9Sstevel@tonic-gate 
12197c478bd9Sstevel@tonic-gate 	/*
122012f080e7Smrj 	 * Memory space
12217c478bd9Sstevel@tonic-gate 	 */
122212f080e7Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
122312f080e7Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
122412f080e7Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
122512f080e7Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
12267c478bd9Sstevel@tonic-gate 
12277c478bd9Sstevel@tonic-gate 	/*
122812f080e7Smrj 	 * Destroy the pointer - the mapping has logically gone
12297c478bd9Sstevel@tonic-gate 	 */
123012f080e7Smrj 	*vaddrp = NULL;
12317c478bd9Sstevel@tonic-gate 
12327c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
12337c478bd9Sstevel@tonic-gate }
12347c478bd9Sstevel@tonic-gate 
123512f080e7Smrj 
123612f080e7Smrj /*
123712f080e7Smrj  * rootnex_map_handle()
123812f080e7Smrj  *
123912f080e7Smrj  */
12407c478bd9Sstevel@tonic-gate static int
124112f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp)
12427c478bd9Sstevel@tonic-gate {
1243843e1988Sjohnlev 	rootnex_addr_t rbase;
124412f080e7Smrj 	ddi_acc_hdl_t *hp;
124512f080e7Smrj 	uint_t pgoffset;
124612f080e7Smrj 	struct regspec *rp;
1247843e1988Sjohnlev 	paddr_t pbase;
12487c478bd9Sstevel@tonic-gate 
124912f080e7Smrj 	rp = mp->map_obj.rp;
12507c478bd9Sstevel@tonic-gate 
125112f080e7Smrj #ifdef	DDI_MAP_DEBUG
125212f080e7Smrj 	ddi_map_debug(
125312f080e7Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
125412f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
125512f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
125612f080e7Smrj #endif	/* DDI_MAP_DEBUG */
12577c478bd9Sstevel@tonic-gate 
12587c478bd9Sstevel@tonic-gate 	/*
125912f080e7Smrj 	 * I/O or memory mapping:
126012f080e7Smrj 	 *
126112f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
126212f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
126312f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
12647c478bd9Sstevel@tonic-gate 	 */
126512f080e7Smrj 	if (rp->regspec_bustype != 0) {
126612f080e7Smrj 		/*
126712f080e7Smrj 		 * This refers to I/O space, and we don't support "mapping"
126812f080e7Smrj 		 * I/O space to a user.
126912f080e7Smrj 		 */
12707c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12717c478bd9Sstevel@tonic-gate 	}
12727c478bd9Sstevel@tonic-gate 
12737c478bd9Sstevel@tonic-gate 	/*
127412f080e7Smrj 	 * Set up the hat_flags for the mapping.
12757c478bd9Sstevel@tonic-gate 	 */
127612f080e7Smrj 	hp = mp->map_handlep;
12777c478bd9Sstevel@tonic-gate 
127812f080e7Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
127912f080e7Smrj 	case DDI_NEVERSWAP_ACC:
128012f080e7Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
12817c478bd9Sstevel@tonic-gate 		break;
128212f080e7Smrj 	case DDI_STRUCTURE_LE_ACC:
128312f080e7Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
12847c478bd9Sstevel@tonic-gate 		break;
128512f080e7Smrj 	case DDI_STRUCTURE_BE_ACC:
12867c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12877c478bd9Sstevel@tonic-gate 	default:
128812f080e7Smrj 		return (DDI_REGS_ACC_CONFLICT);
12897c478bd9Sstevel@tonic-gate 	}
12907c478bd9Sstevel@tonic-gate 
129112f080e7Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
129212f080e7Smrj 	case DDI_STRICTORDER_ACC:
12937c478bd9Sstevel@tonic-gate 		break;
129412f080e7Smrj 	case DDI_UNORDERED_OK_ACC:
129512f080e7Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
12967c478bd9Sstevel@tonic-gate 		break;
129712f080e7Smrj 	case DDI_MERGING_OK_ACC:
129812f080e7Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
12997c478bd9Sstevel@tonic-gate 		break;
130012f080e7Smrj 	case DDI_LOADCACHING_OK_ACC:
130112f080e7Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
130212f080e7Smrj 		break;
130312f080e7Smrj 	case DDI_STORECACHING_OK_ACC:
130412f080e7Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
130512f080e7Smrj 		break;
13067c478bd9Sstevel@tonic-gate 	default:
13077c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
13087c478bd9Sstevel@tonic-gate 	}
13097c478bd9Sstevel@tonic-gate 
1310843e1988Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
1311843e1988Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
1312843e1988Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
13137c478bd9Sstevel@tonic-gate 
131412f080e7Smrj 	if (rp->regspec_size == 0)
131512f080e7Smrj 		return (DDI_ME_INVAL);
13167c478bd9Sstevel@tonic-gate 
1317843e1988Sjohnlev #ifdef __xpv
1318843e1988Sjohnlev 	/*
1319843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
1320843e1988Sjohnlev 	 * the MA to a PA.
1321843e1988Sjohnlev 	 */
1322843e1988Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1323843e1988Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
1324843e1988Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
1325843e1988Sjohnlev 	} else {
1326843e1988Sjohnlev 		pbase = rbase;
1327843e1988Sjohnlev 	}
1328843e1988Sjohnlev #else
1329843e1988Sjohnlev 	pbase = rbase;
1330843e1988Sjohnlev #endif
1331843e1988Sjohnlev 
1332843e1988Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
133312f080e7Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
13347c478bd9Sstevel@tonic-gate 
13357c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
13367c478bd9Sstevel@tonic-gate }
13377c478bd9Sstevel@tonic-gate 
133812f080e7Smrj 
133912f080e7Smrj 
13407c478bd9Sstevel@tonic-gate /*
134112f080e7Smrj  * ************************
134212f080e7Smrj  *  interrupt related code
134312f080e7Smrj  * ************************
13447c478bd9Sstevel@tonic-gate  */
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate /*
134712f080e7Smrj  * rootnex_intr_ops()
13487c478bd9Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13497c478bd9Sstevel@tonic-gate  */
13507c478bd9Sstevel@tonic-gate /* ARGSUSED */
13517c478bd9Sstevel@tonic-gate static int
13527c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13537c478bd9Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13547c478bd9Sstevel@tonic-gate {
13557c478bd9Sstevel@tonic-gate 	struct intrspec			*ispec;
13567c478bd9Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
13577c478bd9Sstevel@tonic-gate 
13587c478bd9Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13597c478bd9Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13607c478bd9Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13617c478bd9Sstevel@tonic-gate 
13627c478bd9Sstevel@tonic-gate 	/* Process the interrupt operation */
13637c478bd9Sstevel@tonic-gate 	switch (intr_op) {
13647c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13657c478bd9Sstevel@tonic-gate 		/* First check with pcplusmp */
13667c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13677c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13687c478bd9Sstevel@tonic-gate 
13697c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13707c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
13717c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13727c478bd9Sstevel@tonic-gate 		}
13737c478bd9Sstevel@tonic-gate 		break;
13747c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13757c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13767c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13777c478bd9Sstevel@tonic-gate 
13787c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13797c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13807c478bd9Sstevel@tonic-gate 		break;
13817c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13827c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13837c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13847c478bd9Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13857c478bd9Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13867c478bd9Sstevel@tonic-gate 		break;
13877c478bd9Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13887c478bd9Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13897c478bd9Sstevel@tonic-gate 		/*
13907c478bd9Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
13917c478bd9Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
13927c478bd9Sstevel@tonic-gate 		 * See detailed comments on this in the function
13937c478bd9Sstevel@tonic-gate 		 * rootnex_get_ispec().
13947c478bd9Sstevel@tonic-gate 		 */
13957c478bd9Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
13967c478bd9Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
13977c478bd9Sstevel@tonic-gate 			    pdp->par_nintr);
13987c478bd9Sstevel@tonic-gate 			/*
13997c478bd9Sstevel@tonic-gate 			 * Set it to zero; so that
14007c478bd9Sstevel@tonic-gate 			 * DDI framework doesn't free it again
14017c478bd9Sstevel@tonic-gate 			 */
14027c478bd9Sstevel@tonic-gate 			pdp->par_intr = NULL;
14037c478bd9Sstevel@tonic-gate 			pdp->par_nintr = 0;
14047c478bd9Sstevel@tonic-gate 		}
14057c478bd9Sstevel@tonic-gate 		break;
14067c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14077c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14087c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14097c478bd9Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14107c478bd9Sstevel@tonic-gate 		break;
14117c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14127c478bd9Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14137c478bd9Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14147c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14157c478bd9Sstevel@tonic-gate 
14167c478bd9Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14177c478bd9Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14187c478bd9Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14197c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14207c478bd9Sstevel@tonic-gate 
14217c478bd9Sstevel@tonic-gate 		/* Change the priority */
14227c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14237c478bd9Sstevel@tonic-gate 		    PSM_FAILURE)
14247c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14257c478bd9Sstevel@tonic-gate 
14267c478bd9Sstevel@tonic-gate 		/* update the ispec with the new priority */
14277c478bd9Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14287c478bd9Sstevel@tonic-gate 		break;
14297c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14307c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14317c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14327c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14337c478bd9Sstevel@tonic-gate 		break;
14347c478bd9Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14357c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14367c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14377c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14387c478bd9Sstevel@tonic-gate 		break;
14397c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14407c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14417c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14427c478bd9Sstevel@tonic-gate 
14437c478bd9Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14447c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14457c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14467c478bd9Sstevel@tonic-gate 
14477a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14487c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
14497c478bd9Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
14507c478bd9Sstevel@tonic-gate 
14517c478bd9Sstevel@tonic-gate 		/* Add the interrupt handler */
14527c478bd9Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14537c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
14547a364d25Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
14557c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14567c478bd9Sstevel@tonic-gate 		break;
14577c478bd9Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14587c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14597c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14607c478bd9Sstevel@tonic-gate 
14617c478bd9Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14627c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14637c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14647c478bd9Sstevel@tonic-gate 
14657a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14667c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14677c478bd9Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14687c478bd9Sstevel@tonic-gate 
14697c478bd9Sstevel@tonic-gate 		/* Remove the interrupt handler */
14707c478bd9Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14717c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14727c478bd9Sstevel@tonic-gate 		break;
14737c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14747c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14757c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14767c478bd9Sstevel@tonic-gate 
14777c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14787c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14797c478bd9Sstevel@tonic-gate 		break;
14807c478bd9Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14817c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14827c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14837c478bd9Sstevel@tonic-gate 
14847c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14857c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14867c478bd9Sstevel@tonic-gate 		break;
14877c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14887c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14897c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14907c478bd9Sstevel@tonic-gate 
14917c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14927c478bd9Sstevel@tonic-gate 		    result)) {
14937c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
14947c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14957c478bd9Sstevel@tonic-gate 		}
14967c478bd9Sstevel@tonic-gate 		break;
1497a54f81fbSanish 	case DDI_INTROP_NAVAIL:
14987c478bd9Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
1499a54f81fbSanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
1500a54f81fbSanish 		if (*(int *)result == 0) {
15017c478bd9Sstevel@tonic-gate 			/*
15027c478bd9Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15037c478bd9Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15047c478bd9Sstevel@tonic-gate 			 *
15057c478bd9Sstevel@tonic-gate 			 * See detailed comments on this in the function
15067c478bd9Sstevel@tonic-gate 			 * rootnex_get_ispec().
15077c478bd9Sstevel@tonic-gate 			 *
15087c478bd9Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15097c478bd9Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15107c478bd9Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15117c478bd9Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15127c478bd9Sstevel@tonic-gate 			 */
15137c478bd9Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15147c478bd9Sstevel@tonic-gate 				*(int *)result = 1;
1515a54f81fbSanish 			else
1516a54f81fbSanish 				return (DDI_FAILURE);
15177c478bd9Sstevel@tonic-gate 		}
15187c478bd9Sstevel@tonic-gate 		break;
15197c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
1520a54f81fbSanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
15217c478bd9Sstevel@tonic-gate 		break;
15227c478bd9Sstevel@tonic-gate 	default:
15237c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
15247c478bd9Sstevel@tonic-gate 	}
15257c478bd9Sstevel@tonic-gate 
15267c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
15277c478bd9Sstevel@tonic-gate }
15287c478bd9Sstevel@tonic-gate 
15297c478bd9Sstevel@tonic-gate 
15307c478bd9Sstevel@tonic-gate /*
153112f080e7Smrj  * rootnex_get_ispec()
153212f080e7Smrj  *	convert an interrupt number to an interrupt specification.
153312f080e7Smrj  *	The interrupt number determines which interrupt spec will be
153412f080e7Smrj  *	returned if more than one exists.
153512f080e7Smrj  *
153612f080e7Smrj  *	Look into the parent private data area of the 'rdip' to find out
153712f080e7Smrj  *	the interrupt specification.  First check to make sure there is
153812f080e7Smrj  *	one that matchs "inumber" and then return a pointer to it.
153912f080e7Smrj  *
154012f080e7Smrj  *	Return NULL if one could not be found.
154112f080e7Smrj  *
154212f080e7Smrj  *	NOTE: This is needed for rootnex_intr_ops()
15437c478bd9Sstevel@tonic-gate  */
154412f080e7Smrj static struct intrspec *
154512f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
15467c478bd9Sstevel@tonic-gate {
154712f080e7Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
15487c478bd9Sstevel@tonic-gate 
15497c478bd9Sstevel@tonic-gate 	/*
155012f080e7Smrj 	 * Special case handling for drivers that provide their own
155112f080e7Smrj 	 * intrspec structures instead of relying on the DDI framework.
155212f080e7Smrj 	 *
155312f080e7Smrj 	 * A broken hardware driver in ON could potentially provide its
155412f080e7Smrj 	 * own intrspec structure, instead of relying on the hardware.
155512f080e7Smrj 	 * If these drivers are children of 'rootnex' then we need to
155612f080e7Smrj 	 * continue to provide backward compatibility to them here.
155712f080e7Smrj 	 *
155812f080e7Smrj 	 * Following check is a special case for 'pcic' driver which
155912f080e7Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
156012f080e7Smrj 	 *
156112f080e7Smrj 	 * Verbatim comments from this driver are shown here:
156212f080e7Smrj 	 * "Don't use the ddi_add_intr since we don't have a
156312f080e7Smrj 	 * default intrspec in all cases."
156412f080e7Smrj 	 *
156512f080e7Smrj 	 * Since an 'ispec' may not be always created for it,
156612f080e7Smrj 	 * check for that and create one if so.
156712f080e7Smrj 	 *
156812f080e7Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
15697c478bd9Sstevel@tonic-gate 	 */
157012f080e7Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
157112f080e7Smrj 		pdp->par_nintr = 1;
157212f080e7Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
157312f080e7Smrj 		    pdp->par_nintr, KM_SLEEP);
157412f080e7Smrj 	}
157512f080e7Smrj 
157612f080e7Smrj 	/* Validate the interrupt number */
157712f080e7Smrj 	if (inum >= pdp->par_nintr)
157812f080e7Smrj 		return (NULL);
157912f080e7Smrj 
158012f080e7Smrj 	/* Get the interrupt structure pointer and return that */
158112f080e7Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
158212f080e7Smrj }
158312f080e7Smrj 
158412f080e7Smrj 
158512f080e7Smrj /*
158612f080e7Smrj  * ******************
158712f080e7Smrj  *  dma related code
158812f080e7Smrj  * ******************
158912f080e7Smrj  */
159012f080e7Smrj 
159112f080e7Smrj /*ARGSUSED*/
159212f080e7Smrj static int
1593*20906b23SVikram Hegde rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1594*20906b23SVikram Hegde     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
1595*20906b23SVikram Hegde     ddi_dma_handle_t *handlep)
159612f080e7Smrj {
159712f080e7Smrj 	uint64_t maxsegmentsize_ll;
159812f080e7Smrj 	uint_t maxsegmentsize;
159912f080e7Smrj 	ddi_dma_impl_t *hp;
160012f080e7Smrj 	rootnex_dma_t *dma;
160112f080e7Smrj 	uint64_t count_max;
160212f080e7Smrj 	uint64_t seg;
160312f080e7Smrj 	int kmflag;
160412f080e7Smrj 	int e;
160512f080e7Smrj 
160612f080e7Smrj 
160712f080e7Smrj 	/* convert our sleep flags */
160812f080e7Smrj 	if (waitfp == DDI_DMA_SLEEP) {
160912f080e7Smrj 		kmflag = KM_SLEEP;
161012f080e7Smrj 	} else {
161112f080e7Smrj 		kmflag = KM_NOSLEEP;
161212f080e7Smrj 	}
161312f080e7Smrj 
161412f080e7Smrj 	/*
161512f080e7Smrj 	 * We try to do only one memory allocation here. We'll do a little
161612f080e7Smrj 	 * pointer manipulation later. If the bind ends up taking more than
161712f080e7Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
161812f080e7Smrj 	 * bind operation. Not great, but much better than before and the
161912f080e7Smrj 	 * best we can do with the current bind interfaces.
162012f080e7Smrj 	 */
162112f080e7Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
162212f080e7Smrj 	if (hp == NULL) {
162312f080e7Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
162412f080e7Smrj 			ddi_set_callback(waitfp, arg,
162512f080e7Smrj 			    &rootnex_state->r_dvma_call_list_id);
162612f080e7Smrj 		}
162712f080e7Smrj 		return (DDI_DMA_NORESOURCES);
162812f080e7Smrj 	}
162912f080e7Smrj 
163012f080e7Smrj 	/* Do our pointer manipulation now, align the structures */
163112f080e7Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
163212f080e7Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
163312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
163412f080e7Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
163512f080e7Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
163612f080e7Smrj 
163712f080e7Smrj 	/* setup the handle */
163812f080e7Smrj 	rootnex_clean_dmahdl(hp);
163912f080e7Smrj 	dma->dp_dip = rdip;
164012f080e7Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
164112f080e7Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
164212f080e7Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
164312f080e7Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
164412f080e7Smrj 	hp->dmai_rdip = rdip;
164512f080e7Smrj 	hp->dmai_attr = *attr;
164612f080e7Smrj 
164712f080e7Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
164812f080e7Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
164912f080e7Smrj 
165012f080e7Smrj 	/*
165112f080e7Smrj 	 * Figure out our maximum segment size. If the segment size is greater
165212f080e7Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
165312f080e7Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
165412f080e7Smrj 	 * dma_attr_count_max are size-1 type values.
165512f080e7Smrj 	 *
165612f080e7Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
165712f080e7Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
165812f080e7Smrj 	 * single cookie).
165912f080e7Smrj 	 */
166012f080e7Smrj 
166112f080e7Smrj 	/* handle the rollover cases */
166212f080e7Smrj 	seg = attr->dma_attr_seg + 1;
166312f080e7Smrj 	if (seg < attr->dma_attr_seg) {
166412f080e7Smrj 		seg = attr->dma_attr_seg;
166512f080e7Smrj 	}
166612f080e7Smrj 	count_max = attr->dma_attr_count_max + 1;
166712f080e7Smrj 	if (count_max < attr->dma_attr_count_max) {
166812f080e7Smrj 		count_max = attr->dma_attr_count_max;
166912f080e7Smrj 	}
167012f080e7Smrj 
167112f080e7Smrj 	/*
167212f080e7Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
167312f080e7Smrj 	 * use a simple mask.
167412f080e7Smrj 	 */
167512f080e7Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
167612f080e7Smrj 		dma->dp_granularity_power_2 = B_FALSE;
167712f080e7Smrj 	} else {
167812f080e7Smrj 		dma->dp_granularity_power_2 = B_TRUE;
167912f080e7Smrj 	}
168012f080e7Smrj 
168112f080e7Smrj 	/*
168212f080e7Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
168312f080e7Smrj 	 * break up a window because we're greater than maxxfer, we might as
168412f080e7Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
168512f080e7Smrj 	 * worry about triming the window later on for this case.
168612f080e7Smrj 	 */
168712f080e7Smrj 	if (attr->dma_attr_granular > 1) {
168812f080e7Smrj 		if (dma->dp_granularity_power_2) {
168912f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
169012f080e7Smrj 			    (attr->dma_attr_maxxfer &
169112f080e7Smrj 			    (attr->dma_attr_granular - 1));
169212f080e7Smrj 		} else {
169312f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
169412f080e7Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
169512f080e7Smrj 		}
169612f080e7Smrj 	} else {
169712f080e7Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
169812f080e7Smrj 	}
169912f080e7Smrj 
170012f080e7Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
170112f080e7Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
170212f080e7Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
170312f080e7Smrj 		maxsegmentsize = 0xFFFFFFFF;
170412f080e7Smrj 	} else {
170512f080e7Smrj 		maxsegmentsize = maxsegmentsize_ll;
170612f080e7Smrj 	}
170712f080e7Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
170812f080e7Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
170912f080e7Smrj 
171012f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
171112f080e7Smrj 	if (rootnex_alloc_check_parms) {
171212f080e7Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
171312f080e7Smrj 		if (e != DDI_SUCCESS) {
171412f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
171512f080e7Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
171612f080e7Smrj 			    (ddi_dma_handle_t)hp);
171712f080e7Smrj 			return (e);
171812f080e7Smrj 		}
171912f080e7Smrj 	}
172012f080e7Smrj 
172112f080e7Smrj 	*handlep = (ddi_dma_handle_t)hp;
172212f080e7Smrj 
172312f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
172412f080e7Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
172512f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
172612f080e7Smrj 
172712f080e7Smrj 	return (DDI_SUCCESS);
172812f080e7Smrj }
172912f080e7Smrj 
173012f080e7Smrj 
173112f080e7Smrj /*
1732*20906b23SVikram Hegde  * rootnex_dma_allochdl()
1733*20906b23SVikram Hegde  *    called from ddi_dma_alloc_handle().
173412f080e7Smrj  */
1735*20906b23SVikram Hegde static int
1736*20906b23SVikram Hegde rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1737*20906b23SVikram Hegde     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1738*20906b23SVikram Hegde {
1739*20906b23SVikram Hegde #if !defined(__xpv)
1740*20906b23SVikram Hegde 	uint_t error = ENOTSUP;
1741*20906b23SVikram Hegde 	int retval;
1742*20906b23SVikram Hegde 
1743*20906b23SVikram Hegde 	retval = iommulib_nex_open(rdip, &error);
1744*20906b23SVikram Hegde 
1745*20906b23SVikram Hegde 	if (retval != DDI_SUCCESS && error == ENOTSUP) {
1746*20906b23SVikram Hegde 		/* No IOMMU */
1747*20906b23SVikram Hegde 		return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1748*20906b23SVikram Hegde 		    handlep));
1749*20906b23SVikram Hegde 	} else if (retval != DDI_SUCCESS) {
1750*20906b23SVikram Hegde 		return (DDI_FAILURE);
1751*20906b23SVikram Hegde 	}
1752*20906b23SVikram Hegde 
1753*20906b23SVikram Hegde 	ASSERT(IOMMU_USED(rdip));
1754*20906b23SVikram Hegde 
1755*20906b23SVikram Hegde 	/* has an IOMMU */
1756*20906b23SVikram Hegde 	return (iommulib_nexdma_allochdl(dip, rdip, attr,
1757*20906b23SVikram Hegde 	    waitfp, arg, handlep));
1758*20906b23SVikram Hegde #else
1759*20906b23SVikram Hegde 	return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1760*20906b23SVikram Hegde 	    handlep));
1761*20906b23SVikram Hegde #endif
1762*20906b23SVikram Hegde }
1763*20906b23SVikram Hegde 
176412f080e7Smrj /*ARGSUSED*/
176512f080e7Smrj static int
1766*20906b23SVikram Hegde rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
1767*20906b23SVikram Hegde     ddi_dma_handle_t handle)
176812f080e7Smrj {
176912f080e7Smrj 	ddi_dma_impl_t *hp;
177012f080e7Smrj 	rootnex_dma_t *dma;
177112f080e7Smrj 
177212f080e7Smrj 
177312f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
177412f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
177512f080e7Smrj 
177612f080e7Smrj 	/* unbind should have been called first */
177712f080e7Smrj 	ASSERT(!dma->dp_inuse);
177812f080e7Smrj 
177912f080e7Smrj 	mutex_destroy(&dma->dp_mutex);
178012f080e7Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
178112f080e7Smrj 
178212f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
178312f080e7Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
178412f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
178512f080e7Smrj 
178612f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
178712f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
178812f080e7Smrj 
178912f080e7Smrj 	return (DDI_SUCCESS);
179012f080e7Smrj }
179112f080e7Smrj 
179212f080e7Smrj /*
1793*20906b23SVikram Hegde  * rootnex_dma_freehdl()
1794*20906b23SVikram Hegde  *    called from ddi_dma_free_handle().
179512f080e7Smrj  */
1796*20906b23SVikram Hegde static int
1797*20906b23SVikram Hegde rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1798*20906b23SVikram Hegde {
1799*20906b23SVikram Hegde #if !defined(__xpv)
1800*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
1801*20906b23SVikram Hegde 		return (iommulib_nexdma_freehdl(dip, rdip, handle));
1802*20906b23SVikram Hegde 	}
1803*20906b23SVikram Hegde #endif
1804*20906b23SVikram Hegde 	return (rootnex_coredma_freehdl(dip, rdip, handle));
1805*20906b23SVikram Hegde }
1806*20906b23SVikram Hegde 
1807*20906b23SVikram Hegde 
180812f080e7Smrj /*ARGSUSED*/
180912f080e7Smrj static int
1810*20906b23SVikram Hegde rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1811*20906b23SVikram Hegde     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1812*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
181312f080e7Smrj {
181412f080e7Smrj 	rootnex_sglinfo_t *sinfo;
181512f080e7Smrj 	ddi_dma_attr_t *attr;
181612f080e7Smrj 	ddi_dma_impl_t *hp;
181712f080e7Smrj 	rootnex_dma_t *dma;
181812f080e7Smrj 	int kmflag;
181912f080e7Smrj 	int e;
182012f080e7Smrj 
182112f080e7Smrj 
182212f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
182312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
182412f080e7Smrj 	sinfo = &dma->dp_sglinfo;
182512f080e7Smrj 	attr = &hp->dmai_attr;
182612f080e7Smrj 
182712f080e7Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
182812f080e7Smrj 
182912f080e7Smrj 	/*
183012f080e7Smrj 	 * This is useful for debugging a driver. Not as useful in a production
183112f080e7Smrj 	 * system. The only time this will fail is if you have a driver bug.
183212f080e7Smrj 	 */
183312f080e7Smrj 	if (rootnex_bind_check_inuse) {
183412f080e7Smrj 		/*
183512f080e7Smrj 		 * No one else should ever have this lock unless someone else
183612f080e7Smrj 		 * is trying to use this handle. So contention on the lock
183712f080e7Smrj 		 * is the same as inuse being set.
183812f080e7Smrj 		 */
183912f080e7Smrj 		e = mutex_tryenter(&dma->dp_mutex);
184012f080e7Smrj 		if (e == 0) {
184112f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
184212f080e7Smrj 			return (DDI_DMA_INUSE);
184312f080e7Smrj 		}
184412f080e7Smrj 		if (dma->dp_inuse) {
184512f080e7Smrj 			mutex_exit(&dma->dp_mutex);
184612f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
184712f080e7Smrj 			return (DDI_DMA_INUSE);
184812f080e7Smrj 		}
184912f080e7Smrj 		dma->dp_inuse = B_TRUE;
185012f080e7Smrj 		mutex_exit(&dma->dp_mutex);
185112f080e7Smrj 	}
185212f080e7Smrj 
185312f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
185412f080e7Smrj 	if (rootnex_bind_check_parms) {
185512f080e7Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
185612f080e7Smrj 		if (e != DDI_SUCCESS) {
185712f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
185812f080e7Smrj 			rootnex_clean_dmahdl(hp);
185912f080e7Smrj 			return (e);
186012f080e7Smrj 		}
186112f080e7Smrj 	}
186212f080e7Smrj 
186312f080e7Smrj 	/* save away the original bind info */
186412f080e7Smrj 	dma->dp_dma = dmareq->dmar_object;
186512f080e7Smrj 
1866*20906b23SVikram Hegde #if !defined(__xpv)
186786c1f4dcSVikram Hegde 	if (rootnex_state->r_intel_iommu_enabled) {
186886c1f4dcSVikram Hegde 		e = intel_iommu_map_sgl(handle, dmareq,
186986c1f4dcSVikram Hegde 		    rootnex_state->r_prealloc_cookies);
187086c1f4dcSVikram Hegde 
187186c1f4dcSVikram Hegde 		switch (e) {
187286c1f4dcSVikram Hegde 		case IOMMU_SGL_SUCCESS:
187386c1f4dcSVikram Hegde 			goto rootnex_sgl_end;
187486c1f4dcSVikram Hegde 
187586c1f4dcSVikram Hegde 		case IOMMU_SGL_DISABLE:
187686c1f4dcSVikram Hegde 			goto rootnex_sgl_start;
187786c1f4dcSVikram Hegde 
187886c1f4dcSVikram Hegde 		case IOMMU_SGL_NORESOURCES:
187986c1f4dcSVikram Hegde 			cmn_err(CE_WARN, "iommu map sgl failed for %s",
188086c1f4dcSVikram Hegde 			    ddi_node_name(dma->dp_dip));
188186c1f4dcSVikram Hegde 			rootnex_clean_dmahdl(hp);
188286c1f4dcSVikram Hegde 			return (DDI_DMA_NORESOURCES);
188386c1f4dcSVikram Hegde 
188486c1f4dcSVikram Hegde 		default:
188586c1f4dcSVikram Hegde 			cmn_err(CE_WARN,
188686c1f4dcSVikram Hegde 			    "undefined value returned from"
188786c1f4dcSVikram Hegde 			    " intel_iommu_map_sgl: %d",
188886c1f4dcSVikram Hegde 			    e);
188986c1f4dcSVikram Hegde 			rootnex_clean_dmahdl(hp);
189086c1f4dcSVikram Hegde 			return (DDI_DMA_NORESOURCES);
189186c1f4dcSVikram Hegde 		}
189286c1f4dcSVikram Hegde 	}
1893*20906b23SVikram Hegde #endif
189486c1f4dcSVikram Hegde 
189586c1f4dcSVikram Hegde rootnex_sgl_start:
189612f080e7Smrj 	/*
189712f080e7Smrj 	 * Figure out a rough estimate of what maximum number of pages this
189812f080e7Smrj 	 * buffer could use (a high estimate of course).
189912f080e7Smrj 	 */
190012f080e7Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
190112f080e7Smrj 
190212f080e7Smrj 	/*
190312f080e7Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
190412f080e7Smrj 	 * fit (more important to be consistent, we don't want to create
190512f080e7Smrj 	 * additional degenerate cases).
190612f080e7Smrj 	 */
190712f080e7Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
190812f080e7Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
190912f080e7Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
191012f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
191112f080e7Smrj 		    uint_t, sinfo->si_max_pages);
191212f080e7Smrj 
191312f080e7Smrj 	/*
191412f080e7Smrj 	 * For anything larger than that, we'll go ahead and allocate the
191512f080e7Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
191612f080e7Smrj 	 * seeing this path in the fast path for high performance devices very
191712f080e7Smrj 	 * frequently.
191812f080e7Smrj 	 *
191912f080e7Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
192012f080e7Smrj 	 * the bind interface would speed this case up.
192112f080e7Smrj 	 */
192212f080e7Smrj 	} else {
192312f080e7Smrj 		/* convert the sleep flags */
192412f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
192512f080e7Smrj 			kmflag =  KM_SLEEP;
192612f080e7Smrj 		} else {
192712f080e7Smrj 			kmflag =  KM_NOSLEEP;
192812f080e7Smrj 		}
192912f080e7Smrj 
193012f080e7Smrj 		/*
193112f080e7Smrj 		 * Save away how much memory we allocated. If we're doing a
193212f080e7Smrj 		 * nosleep, the alloc could fail...
193312f080e7Smrj 		 */
193412f080e7Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
193512f080e7Smrj 		    sizeof (ddi_dma_cookie_t);
193612f080e7Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
193712f080e7Smrj 		if (dma->dp_cookies == NULL) {
193812f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
193912f080e7Smrj 			rootnex_clean_dmahdl(hp);
194012f080e7Smrj 			return (DDI_DMA_NORESOURCES);
194112f080e7Smrj 		}
194212f080e7Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
194312f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
194412f080e7Smrj 		    sinfo->si_max_pages);
194512f080e7Smrj 	}
194612f080e7Smrj 	hp->dmai_cookie = dma->dp_cookies;
194712f080e7Smrj 
194812f080e7Smrj 	/*
194912f080e7Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
195012f080e7Smrj 	 * looking at the contraints in the dma structure. It will then put some
195112f080e7Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
195212f080e7Smrj 	 * clean, or do we need to do some munging; how many pages need to be
195312f080e7Smrj 	 * copied, etc.)
195412f080e7Smrj 	 */
195512f080e7Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
195612f080e7Smrj 	    &dma->dp_sglinfo);
195712f080e7Smrj 
195886c1f4dcSVikram Hegde rootnex_sgl_end:
195986c1f4dcSVikram Hegde 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
196012f080e7Smrj 	/* if we don't need a copy buffer, we don't need to sync */
196112f080e7Smrj 	if (sinfo->si_copybuf_req == 0) {
196212f080e7Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
196312f080e7Smrj 	}
196412f080e7Smrj 
196512f080e7Smrj 	/*
196612f080e7Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
196712f080e7Smrj 	 * hit the fast path. All the high performance devices should be trying
196812f080e7Smrj 	 * to hit this path. To hit this path, a device should be able to reach
196912f080e7Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
197012f080e7Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
197112f080e7Smrj 	 * handle [sgllen]).
197212f080e7Smrj 	 */
197312f080e7Smrj 	if ((sinfo->si_copybuf_req == 0) &&
197412f080e7Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
197512f080e7Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
197612f080e7Smrj 		/*
197785c8e0e8Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
197885c8e0e8Sstephh 		 * handle cache.
197985c8e0e8Sstephh 		 */
198085c8e0e8Sstephh 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
198185c8e0e8Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
198285c8e0e8Sstephh 			(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
198385c8e0e8Sstephh 		}
198485c8e0e8Sstephh 
198585c8e0e8Sstephh 		/*
198612f080e7Smrj 		 * copy out the first cookie and ccountp, set the cookie
198712f080e7Smrj 		 * pointer to the second cookie. The first cookie is passed
198812f080e7Smrj 		 * back on the stack. Additional cookies are accessed via
198912f080e7Smrj 		 * ddi_dma_nextcookie()
199012f080e7Smrj 		 */
199112f080e7Smrj 		*cookiep = dma->dp_cookies[0];
199212f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
199312f080e7Smrj 		hp->dmai_cookie++;
199412f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
199512f080e7Smrj 		hp->dmai_nwin = 1;
199612f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
199712f080e7Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
199812f080e7Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
199912f080e7Smrj 		    dma->dp_dma.dmao_size);
200012f080e7Smrj 		return (DDI_DMA_MAPPED);
200112f080e7Smrj 	}
200212f080e7Smrj 
200312f080e7Smrj 	/*
200412f080e7Smrj 	 * go to the slow path, we may need to alloc more memory, create
200512f080e7Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
200612f080e7Smrj 	 */
200712f080e7Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
200812f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
200912f080e7Smrj 		if (dma->dp_need_to_free_cookie) {
201012f080e7Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
201112f080e7Smrj 		}
201212f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
201312f080e7Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
201412f080e7Smrj 		return (e);
201512f080e7Smrj 	}
201612f080e7Smrj 
201785c8e0e8Sstephh 	/*
201885c8e0e8Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
201985c8e0e8Sstephh 	 * cache.
202085c8e0e8Sstephh 	 */
202185c8e0e8Sstephh 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
202285c8e0e8Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
202385c8e0e8Sstephh 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
202485c8e0e8Sstephh 	}
202585c8e0e8Sstephh 
202612f080e7Smrj 	/* if the first window uses the copy buffer, sync it for the device */
202712f080e7Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
202812f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
202912f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
203012f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
203112f080e7Smrj 	}
203212f080e7Smrj 
203312f080e7Smrj 	/*
203412f080e7Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
203512f080e7Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
203612f080e7Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
203712f080e7Smrj 	 * cookies we return is the number of cookies in the first window.
203812f080e7Smrj 	 */
203912f080e7Smrj 	if (e == DDI_DMA_MAPPED) {
204012f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
204112f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
204212f080e7Smrj 	} else {
204312f080e7Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
204412f080e7Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
204512f080e7Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
204612f080e7Smrj 	}
204712f080e7Smrj 	*cookiep = dma->dp_cookies[0];
204812f080e7Smrj 	hp->dmai_cookie++;
204912f080e7Smrj 
205012f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
205112f080e7Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
205212f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
205312f080e7Smrj 	    dma->dp_dma.dmao_size);
205412f080e7Smrj 	return (e);
205512f080e7Smrj }
205612f080e7Smrj 
205712f080e7Smrj 
205812f080e7Smrj /*
2059*20906b23SVikram Hegde  * rootnex_dma_bindhdl()
2060*20906b23SVikram Hegde  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
206112f080e7Smrj  */
2062*20906b23SVikram Hegde static int
2063*20906b23SVikram Hegde rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2064*20906b23SVikram Hegde     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2065*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2066*20906b23SVikram Hegde {
2067*20906b23SVikram Hegde #if !defined(__xpv)
2068*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
2069*20906b23SVikram Hegde 		return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
2070*20906b23SVikram Hegde 		    cookiep, ccountp));
2071*20906b23SVikram Hegde 	}
2072*20906b23SVikram Hegde #endif
2073*20906b23SVikram Hegde 	return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
2074*20906b23SVikram Hegde 	    cookiep, ccountp));
2075*20906b23SVikram Hegde }
2076*20906b23SVikram Hegde 
207712f080e7Smrj /*ARGSUSED*/
207812f080e7Smrj static int
2079*20906b23SVikram Hegde rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
208012f080e7Smrj     ddi_dma_handle_t handle)
208112f080e7Smrj {
208212f080e7Smrj 	ddi_dma_impl_t *hp;
208312f080e7Smrj 	rootnex_dma_t *dma;
208412f080e7Smrj 	int e;
208512f080e7Smrj 
208612f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
208712f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
208812f080e7Smrj 
208912f080e7Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
209012f080e7Smrj 	if (rootnex_unbind_verify_buffer) {
209112f080e7Smrj 		e = rootnex_verify_buffer(dma);
209212f080e7Smrj 		if (e != DDI_SUCCESS) {
209312f080e7Smrj 			ASSERT(0);
209412f080e7Smrj 			return (DDI_FAILURE);
209512f080e7Smrj 		}
209612f080e7Smrj 	}
209712f080e7Smrj 
209812f080e7Smrj 	/* sync the current window before unbinding the buffer */
209912f080e7Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
210012f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
210112f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
210212f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
210312f080e7Smrj 	}
210412f080e7Smrj 
210512f080e7Smrj 	/*
210600d0963fSdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
210700d0963fSdilpreet 	 * cache.
210800d0963fSdilpreet 	 */
210900d0963fSdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
211000d0963fSdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
211100d0963fSdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
211200d0963fSdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
211300d0963fSdilpreet 		}
211400d0963fSdilpreet 	}
211500d0963fSdilpreet 
211600d0963fSdilpreet 	/*
211712f080e7Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
211812f080e7Smrj 	 * buffer or windows, there won't be much to do :-)
211912f080e7Smrj 	 */
212012f080e7Smrj 	rootnex_teardown_copybuf(dma);
212112f080e7Smrj 	rootnex_teardown_windows(dma);
212212f080e7Smrj 
2123*20906b23SVikram Hegde #if !defined(__xpv)
212412f080e7Smrj 	/*
212586c1f4dcSVikram Hegde 	 * If intel iommu enabled, clean up the page tables and free the dvma
212686c1f4dcSVikram Hegde 	 */
212786c1f4dcSVikram Hegde 	if (rootnex_state->r_intel_iommu_enabled) {
212886c1f4dcSVikram Hegde 		intel_iommu_unmap_sgl(handle);
212986c1f4dcSVikram Hegde 	}
2130*20906b23SVikram Hegde #endif
213186c1f4dcSVikram Hegde 
213286c1f4dcSVikram Hegde 	/*
213312f080e7Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
213412f080e7Smrj 	 * fit into our pre-allocate buffer), free that up now
213512f080e7Smrj 	 */
213612f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
213712f080e7Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
213812f080e7Smrj 	}
213912f080e7Smrj 
214012f080e7Smrj 	/*
214112f080e7Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
214212f080e7Smrj 	 * handle is reused).
214312f080e7Smrj 	 */
214412f080e7Smrj 	rootnex_clean_dmahdl(hp);
214512f080e7Smrj 
214612f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
214712f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
214812f080e7Smrj 
214912f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
215012f080e7Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
215112f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
215212f080e7Smrj 
215312f080e7Smrj 	return (DDI_SUCCESS);
215412f080e7Smrj }
215512f080e7Smrj 
2156*20906b23SVikram Hegde /*
2157*20906b23SVikram Hegde  * rootnex_dma_unbindhdl()
2158*20906b23SVikram Hegde  *    called from ddi_dma_unbind_handle()
2159*20906b23SVikram Hegde  */
2160*20906b23SVikram Hegde /*ARGSUSED*/
2161*20906b23SVikram Hegde static int
2162*20906b23SVikram Hegde rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2163*20906b23SVikram Hegde     ddi_dma_handle_t handle)
2164*20906b23SVikram Hegde {
2165*20906b23SVikram Hegde #if !defined(__xpv)
2166*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
2167*20906b23SVikram Hegde 		return (iommulib_nexdma_unbindhdl(dip, rdip, handle));
2168*20906b23SVikram Hegde 	}
2169*20906b23SVikram Hegde #endif
2170*20906b23SVikram Hegde 	return (rootnex_coredma_unbindhdl(dip, rdip, handle));
2171*20906b23SVikram Hegde }
2172*20906b23SVikram Hegde 
2173*20906b23SVikram Hegde /*ARGSUSED*/
2174*20906b23SVikram Hegde static void
2175*20906b23SVikram Hegde rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2176*20906b23SVikram Hegde {
2177*20906b23SVikram Hegde 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2178*20906b23SVikram Hegde 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2179*20906b23SVikram Hegde 
2180*20906b23SVikram Hegde 	hp->dmai_cookie = &dma->dp_cookies[0];
2181*20906b23SVikram Hegde 	hp->dmai_cookie++;
2182*20906b23SVikram Hegde }
2183*20906b23SVikram Hegde 
2184*20906b23SVikram Hegde /*ARGSUSED*/
2185*20906b23SVikram Hegde static int
2186*20906b23SVikram Hegde rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2187*20906b23SVikram Hegde     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2188*20906b23SVikram Hegde {
2189*20906b23SVikram Hegde 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2190*20906b23SVikram Hegde 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2191*20906b23SVikram Hegde 
2192*20906b23SVikram Hegde 
2193*20906b23SVikram Hegde 	if (hp->dmai_rflags & DDI_DMA_PARTIAL) {
2194*20906b23SVikram Hegde 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2195*20906b23SVikram Hegde 	} else {
2196*20906b23SVikram Hegde 		*ccountp = dma->dp_sglinfo.si_sgl_size;
2197*20906b23SVikram Hegde 	}
2198*20906b23SVikram Hegde 	*cookiep = dma->dp_cookies[0];
2199*20906b23SVikram Hegde 
2200*20906b23SVikram Hegde 	/* reset the cookies */
2201*20906b23SVikram Hegde 	hp->dmai_cookie = &dma->dp_cookies[0];
2202*20906b23SVikram Hegde 	hp->dmai_cookie++;
2203*20906b23SVikram Hegde 
2204*20906b23SVikram Hegde 	return (DDI_SUCCESS);
2205*20906b23SVikram Hegde }
220612f080e7Smrj 
220712f080e7Smrj /*
220812f080e7Smrj  * rootnex_verify_buffer()
220912f080e7Smrj  *   verify buffer wasn't free'd
221012f080e7Smrj  */
221112f080e7Smrj static int
221212f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
221312f080e7Smrj {
221412f080e7Smrj 	page_t **pplist;
221512f080e7Smrj 	caddr_t vaddr;
221612f080e7Smrj 	uint_t pcnt;
221712f080e7Smrj 	uint_t poff;
221812f080e7Smrj 	page_t *pp;
221900d0963fSdilpreet 	char b;
222012f080e7Smrj 	int i;
222112f080e7Smrj 
222212f080e7Smrj 	/* Figure out how many pages this buffer occupies */
222312f080e7Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
222412f080e7Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
222512f080e7Smrj 	} else {
222612f080e7Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
222712f080e7Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
222812f080e7Smrj 	}
222912f080e7Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
223012f080e7Smrj 
223112f080e7Smrj 	switch (dma->dp_dma.dmao_type) {
223212f080e7Smrj 	case DMA_OTYP_PAGES:
223312f080e7Smrj 		/*
223412f080e7Smrj 		 * for a linked list of pp's walk through them to make sure
223512f080e7Smrj 		 * they're locked and not free.
223612f080e7Smrj 		 */
223712f080e7Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
223812f080e7Smrj 		for (i = 0; i < pcnt; i++) {
223912f080e7Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
224012f080e7Smrj 				return (DDI_FAILURE);
224112f080e7Smrj 			}
22427c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
22437c478bd9Sstevel@tonic-gate 		}
22447c478bd9Sstevel@tonic-gate 		break;
224512f080e7Smrj 
22467c478bd9Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
22477c478bd9Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
224812f080e7Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
224912f080e7Smrj 		/*
225012f080e7Smrj 		 * for an array of pp's walk through them to make sure they're
225112f080e7Smrj 		 * not free. It's possible that they may not be locked.
225212f080e7Smrj 		 */
225312f080e7Smrj 		if (pplist) {
225412f080e7Smrj 			for (i = 0; i < pcnt; i++) {
225512f080e7Smrj 				if (PP_ISFREE(pplist[i])) {
225612f080e7Smrj 					return (DDI_FAILURE);
225712f080e7Smrj 				}
225812f080e7Smrj 			}
225912f080e7Smrj 
226012f080e7Smrj 		/* For a virtual address, try to peek at each page */
226112f080e7Smrj 		} else {
226212f080e7Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
226312f080e7Smrj 				for (i = 0; i < pcnt; i++) {
226400d0963fSdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
226500d0963fSdilpreet 					    DDI_FAILURE)
226612f080e7Smrj 						return (DDI_FAILURE);
226700d0963fSdilpreet 					vaddr += MMU_PAGESIZE;
226812f080e7Smrj 				}
226912f080e7Smrj 			}
227012f080e7Smrj 		}
227112f080e7Smrj 		break;
227212f080e7Smrj 
227312f080e7Smrj 	default:
227412f080e7Smrj 		ASSERT(0);
227512f080e7Smrj 		break;
227612f080e7Smrj 	}
227712f080e7Smrj 
227812f080e7Smrj 	return (DDI_SUCCESS);
227912f080e7Smrj }
228012f080e7Smrj 
228112f080e7Smrj 
228212f080e7Smrj /*
228312f080e7Smrj  * rootnex_clean_dmahdl()
228412f080e7Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
228512f080e7Smrj  *    unbind handle. Set the handle state to the default settings.
228612f080e7Smrj  */
228712f080e7Smrj static void
228812f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
228912f080e7Smrj {
229012f080e7Smrj 	rootnex_dma_t *dma;
229112f080e7Smrj 
229212f080e7Smrj 
229312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
229412f080e7Smrj 
229512f080e7Smrj 	hp->dmai_nwin = 0;
229612f080e7Smrj 	dma->dp_current_cookie = 0;
229712f080e7Smrj 	dma->dp_copybuf_size = 0;
229812f080e7Smrj 	dma->dp_window = NULL;
229912f080e7Smrj 	dma->dp_cbaddr = NULL;
230012f080e7Smrj 	dma->dp_inuse = B_FALSE;
230112f080e7Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
230212f080e7Smrj 	dma->dp_need_to_free_window = B_FALSE;
230312f080e7Smrj 	dma->dp_partial_required = B_FALSE;
230412f080e7Smrj 	dma->dp_trim_required = B_FALSE;
230512f080e7Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
230612f080e7Smrj #if !defined(__amd64)
230712f080e7Smrj 	dma->dp_cb_remaping = B_FALSE;
230812f080e7Smrj 	dma->dp_kva = NULL;
230912f080e7Smrj #endif
231012f080e7Smrj 
231112f080e7Smrj 	/* FMA related initialization */
231212f080e7Smrj 	hp->dmai_fault = 0;
231312f080e7Smrj 	hp->dmai_fault_check = NULL;
231412f080e7Smrj 	hp->dmai_fault_notify = NULL;
231512f080e7Smrj 	hp->dmai_error.err_ena = 0;
231612f080e7Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
231712f080e7Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
231812f080e7Smrj 	hp->dmai_error.err_ontrap = NULL;
231912f080e7Smrj 	hp->dmai_error.err_fep = NULL;
232000d0963fSdilpreet 	hp->dmai_error.err_cf = NULL;
232112f080e7Smrj }
232212f080e7Smrj 
232312f080e7Smrj 
232412f080e7Smrj /*
232512f080e7Smrj  * rootnex_valid_alloc_parms()
232612f080e7Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
232712f080e7Smrj  */
232812f080e7Smrj static int
232912f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
233012f080e7Smrj {
233112f080e7Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
233212f080e7Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
233312f080e7Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
233412f080e7Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
233512f080e7Smrj 		return (DDI_DMA_BADATTR);
233612f080e7Smrj 	}
233712f080e7Smrj 
233812f080e7Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
233912f080e7Smrj 		return (DDI_DMA_BADATTR);
234012f080e7Smrj 	}
234112f080e7Smrj 
234212f080e7Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
234312f080e7Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
234412f080e7Smrj 	    attr->dma_attr_sgllen <= 0) {
234512f080e7Smrj 		return (DDI_DMA_BADATTR);
234612f080e7Smrj 	}
234712f080e7Smrj 
234812f080e7Smrj 	/* We should be able to DMA into every byte offset in a page */
234912f080e7Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
235012f080e7Smrj 		return (DDI_DMA_BADATTR);
235112f080e7Smrj 	}
235212f080e7Smrj 
235312f080e7Smrj 	return (DDI_SUCCESS);
235412f080e7Smrj }
235512f080e7Smrj 
235612f080e7Smrj 
235712f080e7Smrj /*
235812f080e7Smrj  * rootnex_valid_bind_parms()
235912f080e7Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
236012f080e7Smrj  */
236112f080e7Smrj /* ARGSUSED */
236212f080e7Smrj static int
236312f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
236412f080e7Smrj {
236512f080e7Smrj #if !defined(__amd64)
236612f080e7Smrj 	/*
236712f080e7Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
236812f080e7Smrj 	 * we can track the offset for the obsoleted interfaces.
236912f080e7Smrj 	 */
237012f080e7Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
237112f080e7Smrj 		return (DDI_DMA_TOOBIG);
237212f080e7Smrj 	}
237312f080e7Smrj #endif
237412f080e7Smrj 
237512f080e7Smrj 	return (DDI_SUCCESS);
237612f080e7Smrj }
237712f080e7Smrj 
237812f080e7Smrj 
237912f080e7Smrj /*
238012f080e7Smrj  * rootnex_get_sgl()
238112f080e7Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
238212f080e7Smrj  *    with a call to the vm layer when vm2.0 comes around...
238312f080e7Smrj  */
238412f080e7Smrj static void
238512f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
238612f080e7Smrj     rootnex_sglinfo_t *sglinfo)
238712f080e7Smrj {
238812f080e7Smrj 	ddi_dma_atyp_t buftype;
2389843e1988Sjohnlev 	rootnex_addr_t raddr;
239012f080e7Smrj 	uint64_t last_page;
239112f080e7Smrj 	uint64_t offset;
239212f080e7Smrj 	uint64_t addrhi;
239312f080e7Smrj 	uint64_t addrlo;
239412f080e7Smrj 	uint64_t maxseg;
239512f080e7Smrj 	page_t **pplist;
239612f080e7Smrj 	uint64_t paddr;
239712f080e7Smrj 	uint32_t psize;
239812f080e7Smrj 	uint32_t size;
239912f080e7Smrj 	caddr_t vaddr;
240012f080e7Smrj 	uint_t pcnt;
240112f080e7Smrj 	page_t *pp;
240212f080e7Smrj 	uint_t cnt;
240312f080e7Smrj 
240412f080e7Smrj 
240512f080e7Smrj 	/* shortcuts */
240612f080e7Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
240712f080e7Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
240812f080e7Smrj 	maxseg = sglinfo->si_max_cookie_size;
240912f080e7Smrj 	buftype = dmar_object->dmao_type;
241012f080e7Smrj 	addrhi = sglinfo->si_max_addr;
241112f080e7Smrj 	addrlo = sglinfo->si_min_addr;
241212f080e7Smrj 	size = dmar_object->dmao_size;
241312f080e7Smrj 
241412f080e7Smrj 	pcnt = 0;
241512f080e7Smrj 	cnt = 0;
241612f080e7Smrj 
241712f080e7Smrj 	/*
241812f080e7Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
241912f080e7Smrj 	 * page_t, use this to get our physical address and buf offset.
242012f080e7Smrj 	 */
242112f080e7Smrj 	if (buftype == DMA_OTYP_PAGES) {
242212f080e7Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
242312f080e7Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
242412f080e7Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
242512f080e7Smrj 		    MMU_PAGEOFFSET;
2426843e1988Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
242712f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
242812f080e7Smrj 		pp = pp->p_next;
242912f080e7Smrj 		sglinfo->si_asp = NULL;
243012f080e7Smrj 
243112f080e7Smrj 	/*
243212f080e7Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
243312f080e7Smrj 	 * down an array of pages, use this to get our physical address and buf
243412f080e7Smrj 	 * offset.
243512f080e7Smrj 	 */
243612f080e7Smrj 	} else if (pplist != NULL) {
243712f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
243812f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
243912f080e7Smrj 
244012f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
244112f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
244212f080e7Smrj 		if (sglinfo->si_asp == NULL) {
244312f080e7Smrj 			sglinfo->si_asp = &kas;
244412f080e7Smrj 		}
244512f080e7Smrj 
244612f080e7Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2447843e1988Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
244812f080e7Smrj 		paddr += offset;
244912f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
245012f080e7Smrj 		pcnt++;
245112f080e7Smrj 
245212f080e7Smrj 	/*
245312f080e7Smrj 	 * All we have is a virtual address, we'll need to call into the VM
245412f080e7Smrj 	 * to get the physical address.
245512f080e7Smrj 	 */
245612f080e7Smrj 	} else {
245712f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
245812f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
245912f080e7Smrj 
246012f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
246112f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
246212f080e7Smrj 		if (sglinfo->si_asp == NULL) {
246312f080e7Smrj 			sglinfo->si_asp = &kas;
246412f080e7Smrj 		}
246512f080e7Smrj 
2466843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
246712f080e7Smrj 		paddr += offset;
246812f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
246912f080e7Smrj 		vaddr += psize;
247012f080e7Smrj 	}
247112f080e7Smrj 
2472843e1988Sjohnlev #ifdef __xpv
2473843e1988Sjohnlev 	/*
2474843e1988Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
2475843e1988Sjohnlev 	 * the cookies with MFNs instead of PFNs.
2476843e1988Sjohnlev 	 */
2477843e1988Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
2478843e1988Sjohnlev #else
2479843e1988Sjohnlev 	raddr = paddr;
2480843e1988Sjohnlev #endif
2481843e1988Sjohnlev 
248212f080e7Smrj 	/*
248312f080e7Smrj 	 * Setup the first cookie with the physical address of the page and the
248412f080e7Smrj 	 * size of the page (which takes into account the initial offset into
248512f080e7Smrj 	 * the page.
248612f080e7Smrj 	 */
2487843e1988Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
248812f080e7Smrj 	sgl[cnt].dmac_size = psize;
248912f080e7Smrj 	sgl[cnt].dmac_type = 0;
249012f080e7Smrj 
249112f080e7Smrj 	/*
249212f080e7Smrj 	 * Save away the buffer offset into the page. We'll need this later in
249312f080e7Smrj 	 * the copy buffer code to help figure out the page index within the
249412f080e7Smrj 	 * buffer and the offset into the current page.
249512f080e7Smrj 	 */
249612f080e7Smrj 	sglinfo->si_buf_offset = offset;
249712f080e7Smrj 
249812f080e7Smrj 	/*
249912f080e7Smrj 	 * If the DMA engine can't reach the physical address, increase how
250012f080e7Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
250112f080e7Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
250212f080e7Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
250312f080e7Smrj 	 * last cookie, go to the next cookie (since we separate each page which
250412f080e7Smrj 	 * uses the copy buffer in case the copy buffer is not physically
250512f080e7Smrj 	 * contiguous.
250612f080e7Smrj 	 */
2507843e1988Sjohnlev 	if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
250812f080e7Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
250912f080e7Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
251012f080e7Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
251112f080e7Smrj 			cnt++;
251212f080e7Smrj 			sgl[cnt].dmac_laddress = 0;
251312f080e7Smrj 			sgl[cnt].dmac_size = 0;
251412f080e7Smrj 			sgl[cnt].dmac_type = 0;
251512f080e7Smrj 		}
251612f080e7Smrj 	}
251712f080e7Smrj 
251812f080e7Smrj 	/*
251912f080e7Smrj 	 * save this page's physical address so we can figure out if the next
252012f080e7Smrj 	 * page is physically contiguous. Keep decrementing size until we are
252112f080e7Smrj 	 * done with the buffer.
252212f080e7Smrj 	 */
2523843e1988Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
252412f080e7Smrj 	size -= psize;
252512f080e7Smrj 
252612f080e7Smrj 	while (size > 0) {
252712f080e7Smrj 		/* Get the size for this page (i.e. partial or full page) */
252812f080e7Smrj 		psize = MIN(size, MMU_PAGESIZE);
252912f080e7Smrj 
253012f080e7Smrj 		if (buftype == DMA_OTYP_PAGES) {
253112f080e7Smrj 			/* get the paddr from the page_t */
253212f080e7Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2533843e1988Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
253412f080e7Smrj 			pp = pp->p_next;
253512f080e7Smrj 		} else if (pplist != NULL) {
253612f080e7Smrj 			/* index into the array of page_t's to get the paddr */
253712f080e7Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2538843e1988Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
253912f080e7Smrj 			pcnt++;
254012f080e7Smrj 		} else {
254112f080e7Smrj 			/* call into the VM to get the paddr */
2542843e1988Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
254312f080e7Smrj 			    vaddr));
254412f080e7Smrj 			vaddr += psize;
254512f080e7Smrj 		}
254612f080e7Smrj 
2547843e1988Sjohnlev #ifdef __xpv
2548843e1988Sjohnlev 		/*
2549843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
2550843e1988Sjohnlev 		 * the cookies with MFNs instead of PFNs.
2551843e1988Sjohnlev 		 */
2552843e1988Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
2553843e1988Sjohnlev #else
2554843e1988Sjohnlev 		raddr = paddr;
2555843e1988Sjohnlev #endif
2556843e1988Sjohnlev 
255712f080e7Smrj 		/* check to see if this page needs the copy buffer */
2558843e1988Sjohnlev 		if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
255912f080e7Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
256012f080e7Smrj 
256112f080e7Smrj 			/*
256212f080e7Smrj 			 * if there is something in the current cookie, go to
256312f080e7Smrj 			 * the next one. We only want one page in a cookie which
256412f080e7Smrj 			 * uses the copybuf since the copybuf doesn't have to
256512f080e7Smrj 			 * be physically contiguous.
256612f080e7Smrj 			 */
256712f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
256812f080e7Smrj 				cnt++;
256912f080e7Smrj 			}
2570843e1988Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
257112f080e7Smrj 			sgl[cnt].dmac_size = psize;
257212f080e7Smrj #if defined(__amd64)
257312f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
257412f080e7Smrj #else
257512f080e7Smrj 			/*
257612f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
257712f080e7Smrj 			 * obsoleted interfaces.
257812f080e7Smrj 			 */
257912f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
258012f080e7Smrj 			    (dmar_object->dmao_size - size);
258112f080e7Smrj #endif
258212f080e7Smrj 			/* if this isn't the last cookie, go to the next one */
258312f080e7Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
258412f080e7Smrj 				cnt++;
258512f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
258612f080e7Smrj 				sgl[cnt].dmac_size = 0;
258712f080e7Smrj 				sgl[cnt].dmac_type = 0;
258812f080e7Smrj 			}
258912f080e7Smrj 
259012f080e7Smrj 		/*
259112f080e7Smrj 		 * this page didn't need the copy buffer, if it's not physically
259212f080e7Smrj 		 * contiguous, or it would put us over a segment boundary, or it
259312f080e7Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
259412f080e7Smrj 		 * have anything in it.
259512f080e7Smrj 		 */
2596843e1988Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
2597843e1988Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
259812f080e7Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
259912f080e7Smrj 		    (sgl[cnt].dmac_size == 0)) {
260012f080e7Smrj 			/*
260112f080e7Smrj 			 * if we're not already in a new cookie, go to the next
260212f080e7Smrj 			 * cookie.
260312f080e7Smrj 			 */
260412f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
260512f080e7Smrj 				cnt++;
260612f080e7Smrj 			}
260712f080e7Smrj 
260812f080e7Smrj 			/* save the cookie information */
2609843e1988Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
261012f080e7Smrj 			sgl[cnt].dmac_size = psize;
261112f080e7Smrj #if defined(__amd64)
261212f080e7Smrj 			sgl[cnt].dmac_type = 0;
261312f080e7Smrj #else
261412f080e7Smrj 			/*
261512f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
261612f080e7Smrj 			 * obsoleted interfaces.
261712f080e7Smrj 			 */
261812f080e7Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
261912f080e7Smrj #endif
262012f080e7Smrj 
262112f080e7Smrj 		/*
262212f080e7Smrj 		 * this page didn't need the copy buffer, it is physically
262312f080e7Smrj 		 * contiguous with the last page, and it's <= the max cookie
262412f080e7Smrj 		 * size.
262512f080e7Smrj 		 */
262612f080e7Smrj 		} else {
262712f080e7Smrj 			sgl[cnt].dmac_size += psize;
262812f080e7Smrj 
262912f080e7Smrj 			/*
263012f080e7Smrj 			 * if this exactly ==  the maximum cookie size, and
263112f080e7Smrj 			 * it isn't the last cookie, go to the next cookie.
263212f080e7Smrj 			 */
263312f080e7Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
263412f080e7Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
263512f080e7Smrj 				cnt++;
263612f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
263712f080e7Smrj 				sgl[cnt].dmac_size = 0;
263812f080e7Smrj 				sgl[cnt].dmac_type = 0;
263912f080e7Smrj 			}
264012f080e7Smrj 		}
264112f080e7Smrj 
264212f080e7Smrj 		/*
264312f080e7Smrj 		 * save this page's physical address so we can figure out if the
264412f080e7Smrj 		 * next page is physically contiguous. Keep decrementing size
264512f080e7Smrj 		 * until we are done with the buffer.
264612f080e7Smrj 		 */
2647843e1988Sjohnlev 		last_page = raddr;
264812f080e7Smrj 		size -= psize;
264912f080e7Smrj 	}
265012f080e7Smrj 
265112f080e7Smrj 	/* we're done, save away how many cookies the sgl has */
265212f080e7Smrj 	if (sgl[cnt].dmac_size == 0) {
265312f080e7Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
265412f080e7Smrj 		sglinfo->si_sgl_size = cnt;
265512f080e7Smrj 	} else {
265612f080e7Smrj 		sglinfo->si_sgl_size = cnt + 1;
265712f080e7Smrj 	}
265812f080e7Smrj }
265912f080e7Smrj 
266012f080e7Smrj 
266112f080e7Smrj /*
266212f080e7Smrj  * rootnex_bind_slowpath()
266312f080e7Smrj  *    Call in the bind path if the calling driver can't use the sgl without
266412f080e7Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
266512f080e7Smrj  *    with a partial bind.
266612f080e7Smrj  */
266712f080e7Smrj static int
266812f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
266912f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
267012f080e7Smrj {
267112f080e7Smrj 	rootnex_sglinfo_t *sinfo;
267212f080e7Smrj 	rootnex_window_t *window;
267312f080e7Smrj 	ddi_dma_cookie_t *cookie;
267412f080e7Smrj 	size_t copybuf_used;
267512f080e7Smrj 	size_t dmac_size;
267612f080e7Smrj 	boolean_t partial;
267712f080e7Smrj 	off_t cur_offset;
267812f080e7Smrj 	page_t *cur_pp;
267912f080e7Smrj 	major_t mnum;
268012f080e7Smrj 	int e;
268112f080e7Smrj 	int i;
268212f080e7Smrj 
268312f080e7Smrj 
268412f080e7Smrj 	sinfo = &dma->dp_sglinfo;
268512f080e7Smrj 	copybuf_used = 0;
268612f080e7Smrj 	partial = B_FALSE;
268712f080e7Smrj 
268812f080e7Smrj 	/*
268912f080e7Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
269012f080e7Smrj 	 * Needs to be first since it sets the copy buffer size.
269112f080e7Smrj 	 */
269212f080e7Smrj 	if (sinfo->si_copybuf_req != 0) {
269312f080e7Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
269412f080e7Smrj 		if (e != DDI_SUCCESS) {
269512f080e7Smrj 			return (e);
269612f080e7Smrj 		}
269712f080e7Smrj 	} else {
269812f080e7Smrj 		dma->dp_copybuf_size = 0;
269912f080e7Smrj 	}
270012f080e7Smrj 
270112f080e7Smrj 	/*
270212f080e7Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
270312f080e7Smrj 	 * if we need to trim the buffers when we munge the sgl.
270412f080e7Smrj 	 */
270512f080e7Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
270612f080e7Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
270712f080e7Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
270812f080e7Smrj 		dma->dp_partial_required = B_TRUE;
270912f080e7Smrj 		if (attr->dma_attr_granular != 1) {
271012f080e7Smrj 			dma->dp_trim_required = B_TRUE;
271112f080e7Smrj 		}
271212f080e7Smrj 	} else {
271312f080e7Smrj 		dma->dp_partial_required = B_FALSE;
271412f080e7Smrj 		dma->dp_trim_required = B_FALSE;
271512f080e7Smrj 	}
271612f080e7Smrj 
271712f080e7Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
271812f080e7Smrj 	if (dma->dp_partial_required &&
271912f080e7Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
272012f080e7Smrj 
272112f080e7Smrj 		mnum = ddi_driver_major(dma->dp_dip);
272212f080e7Smrj 		/*
272312f080e7Smrj 		 * patchable which allows us to print one warning per major
272412f080e7Smrj 		 * number.
272512f080e7Smrj 		 */
272612f080e7Smrj 		if ((rootnex_bind_warn) &&
272712f080e7Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
272812f080e7Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
272912f080e7Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
273012f080e7Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
273112f080e7Smrj 			    "There is a small risk of data corruption in "
273212f080e7Smrj 			    "particular with large I/Os. The driver should be "
273312f080e7Smrj 			    "replaced with a corrected version for proper "
273412f080e7Smrj 			    "system operation. To disable this warning, add "
273512f080e7Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
273612f080e7Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
273712f080e7Smrj 		}
273812f080e7Smrj 		return (DDI_DMA_TOOBIG);
273912f080e7Smrj 	}
274012f080e7Smrj 
274112f080e7Smrj 	/*
274212f080e7Smrj 	 * we might need multiple windows, setup state to handle them. In this
274312f080e7Smrj 	 * code path, we will have at least one window.
274412f080e7Smrj 	 */
274512f080e7Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
274612f080e7Smrj 	if (e != DDI_SUCCESS) {
274712f080e7Smrj 		rootnex_teardown_copybuf(dma);
274812f080e7Smrj 		return (e);
274912f080e7Smrj 	}
275012f080e7Smrj 
275112f080e7Smrj 	window = &dma->dp_window[0];
275212f080e7Smrj 	cookie = &dma->dp_cookies[0];
275312f080e7Smrj 	cur_offset = 0;
275412f080e7Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
275512f080e7Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
275612f080e7Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
275712f080e7Smrj 	}
275812f080e7Smrj 
275912f080e7Smrj 	/* loop though all the cookies we got back from get_sgl() */
276012f080e7Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
276112f080e7Smrj 		/*
276212f080e7Smrj 		 * If we're using the copy buffer, check this cookie and setup
276312f080e7Smrj 		 * its associated copy buffer state. If this cookie uses the
276412f080e7Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
276512f080e7Smrj 		 */
276612f080e7Smrj 		if (dma->dp_copybuf_size > 0) {
276712f080e7Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
276812f080e7Smrj 			    cur_offset, &copybuf_used, &cur_pp);
276912f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
277012f080e7Smrj 				window->wd_dosync = B_TRUE;
277112f080e7Smrj 			}
277212f080e7Smrj 		}
277312f080e7Smrj 
277412f080e7Smrj 		/*
277512f080e7Smrj 		 * save away the cookie size, since it could be modified in
277612f080e7Smrj 		 * the windowing code.
277712f080e7Smrj 		 */
277812f080e7Smrj 		dmac_size = cookie->dmac_size;
277912f080e7Smrj 
278012f080e7Smrj 		/* if we went over max copybuf size */
278112f080e7Smrj 		if (dma->dp_copybuf_size &&
278212f080e7Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
278312f080e7Smrj 			partial = B_TRUE;
278412f080e7Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
278512f080e7Smrj 			    cookie, cur_offset, &copybuf_used);
278612f080e7Smrj 			if (e != DDI_SUCCESS) {
278712f080e7Smrj 				rootnex_teardown_copybuf(dma);
278812f080e7Smrj 				rootnex_teardown_windows(dma);
278912f080e7Smrj 				return (e);
279012f080e7Smrj 			}
279112f080e7Smrj 
279212f080e7Smrj 			/*
279312f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
279412f080e7Smrj 			 * new window we just moved to is set to sync.
279512f080e7Smrj 			 */
279612f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
279712f080e7Smrj 				window->wd_dosync = B_TRUE;
279812f080e7Smrj 			}
279912f080e7Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
280012f080e7Smrj 			    dma->dp_dip);
280112f080e7Smrj 
280212f080e7Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
280312f080e7Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
280412f080e7Smrj 			partial = B_TRUE;
280512f080e7Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
280612f080e7Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
280712f080e7Smrj 			    cookie, attr, cur_offset);
280812f080e7Smrj 			if (e != DDI_SUCCESS) {
280912f080e7Smrj 				rootnex_teardown_copybuf(dma);
281012f080e7Smrj 				rootnex_teardown_windows(dma);
281112f080e7Smrj 				return (e);
281212f080e7Smrj 			}
281312f080e7Smrj 
281412f080e7Smrj 			/*
281512f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
281612f080e7Smrj 			 * new window we just moved to is set to sync.
281712f080e7Smrj 			 */
281812f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
281912f080e7Smrj 				window->wd_dosync = B_TRUE;
282012f080e7Smrj 			}
282112f080e7Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
282212f080e7Smrj 			    dma->dp_dip);
282312f080e7Smrj 
282412f080e7Smrj 		/* else if we will be over maxxfer */
282512f080e7Smrj 		} else if ((window->wd_size + dmac_size) >
282612f080e7Smrj 		    dma->dp_maxxfer) {
282712f080e7Smrj 			partial = B_TRUE;
282812f080e7Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
282912f080e7Smrj 			    cookie);
283012f080e7Smrj 			if (e != DDI_SUCCESS) {
283112f080e7Smrj 				rootnex_teardown_copybuf(dma);
283212f080e7Smrj 				rootnex_teardown_windows(dma);
283312f080e7Smrj 				return (e);
283412f080e7Smrj 			}
283512f080e7Smrj 
283612f080e7Smrj 			/*
283712f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
283812f080e7Smrj 			 * new window we just moved to is set to sync.
283912f080e7Smrj 			 */
284012f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
284112f080e7Smrj 				window->wd_dosync = B_TRUE;
284212f080e7Smrj 			}
284312f080e7Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
284412f080e7Smrj 			    dma->dp_dip);
284512f080e7Smrj 
284612f080e7Smrj 		/* else this cookie fits in the current window */
284712f080e7Smrj 		} else {
284812f080e7Smrj 			window->wd_cookie_cnt++;
284912f080e7Smrj 			window->wd_size += dmac_size;
285012f080e7Smrj 		}
285112f080e7Smrj 
285212f080e7Smrj 		/* track our offset into the buffer, go to the next cookie */
285312f080e7Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
285412f080e7Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
285512f080e7Smrj 		cur_offset += dmac_size;
285612f080e7Smrj 		cookie++;
285712f080e7Smrj 	}
285812f080e7Smrj 
285912f080e7Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
286012f080e7Smrj 	if (window->wd_size == 0) {
286112f080e7Smrj 		hp->dmai_nwin--;
286212f080e7Smrj 		window--;
286312f080e7Smrj 	}
286412f080e7Smrj 
286512f080e7Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
286612f080e7Smrj 
286712f080e7Smrj 	if (!partial) {
286812f080e7Smrj 		return (DDI_DMA_MAPPED);
286912f080e7Smrj 	}
287012f080e7Smrj 
287112f080e7Smrj 	ASSERT(dma->dp_partial_required);
287212f080e7Smrj 	return (DDI_DMA_PARTIAL_MAP);
287312f080e7Smrj }
287412f080e7Smrj 
287512f080e7Smrj 
287612f080e7Smrj /*
287712f080e7Smrj  * rootnex_setup_copybuf()
287812f080e7Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
287912f080e7Smrj  *    buffer, and if we do, sets up the basic state to handle it.
288012f080e7Smrj  */
288112f080e7Smrj static int
288212f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
288312f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
288412f080e7Smrj {
288512f080e7Smrj 	rootnex_sglinfo_t *sinfo;
288612f080e7Smrj 	ddi_dma_attr_t lattr;
288712f080e7Smrj 	size_t max_copybuf;
288812f080e7Smrj 	int cansleep;
288912f080e7Smrj 	int e;
289012f080e7Smrj #if !defined(__amd64)
289112f080e7Smrj 	int vmflag;
289212f080e7Smrj #endif
289312f080e7Smrj 
289412f080e7Smrj 
289512f080e7Smrj 	sinfo = &dma->dp_sglinfo;
289612f080e7Smrj 
289736945f79Smrj 	/* read this first so it's consistent through the routine  */
289836945f79Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
289912f080e7Smrj 
290012f080e7Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
290112f080e7Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
290212f080e7Smrj 
290312f080e7Smrj 	/* make sure the copybuf size <= the max size */
290412f080e7Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
290512f080e7Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
290612f080e7Smrj 
290712f080e7Smrj #if !defined(__amd64)
290812f080e7Smrj 	/*
290912f080e7Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
291012f080e7Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
291112f080e7Smrj 	 * the 64-bit kernel.
291212f080e7Smrj 	 */
291312f080e7Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
291412f080e7Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
291512f080e7Smrj 
291612f080e7Smrj 		/* convert the sleep flags */
291712f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
291812f080e7Smrj 			vmflag = VM_SLEEP;
291912f080e7Smrj 		} else {
292012f080e7Smrj 			vmflag = VM_NOSLEEP;
292112f080e7Smrj 		}
292212f080e7Smrj 
292312f080e7Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
292412f080e7Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
292512f080e7Smrj 		    vmflag);
292612f080e7Smrj 		if (dma->dp_kva == NULL) {
292712f080e7Smrj 			return (DDI_DMA_NORESOURCES);
292812f080e7Smrj 		}
292912f080e7Smrj 	}
293012f080e7Smrj #endif
293112f080e7Smrj 
293212f080e7Smrj 	/* convert the sleep flags */
293312f080e7Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
293412f080e7Smrj 		cansleep = 1;
293512f080e7Smrj 	} else {
293612f080e7Smrj 		cansleep = 0;
293712f080e7Smrj 	}
293812f080e7Smrj 
293912f080e7Smrj 	/*
2940d21b39ddSmrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
2941d21b39ddSmrj 	 * engine limits, so we can't use kmem_alloc... We don't need
2942d21b39ddSmrj 	 * contiguous memory (sgllen) since we will be forcing windows on
2943d21b39ddSmrj 	 * sgllen anyway.
294412f080e7Smrj 	 */
294512f080e7Smrj 	lattr = *attr;
294612f080e7Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
2947d21b39ddSmrj 	/*
2948d21b39ddSmrj 	 * this should be < 0 to indicate no limit, but due to a bug in
2949d21b39ddSmrj 	 * the rootnex, we'll set it to the maximum positive int.
2950d21b39ddSmrj 	 */
2951d21b39ddSmrj 	lattr.dma_attr_sgllen = 0x7fffffff;
295212f080e7Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
295312f080e7Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
295412f080e7Smrj 	if (e != DDI_SUCCESS) {
295512f080e7Smrj #if !defined(__amd64)
295612f080e7Smrj 		if (dma->dp_kva != NULL) {
295712f080e7Smrj 			vmem_free(heap_arena, dma->dp_kva,
295812f080e7Smrj 			    dma->dp_copybuf_size);
295912f080e7Smrj 		}
296012f080e7Smrj #endif
296112f080e7Smrj 		return (DDI_DMA_NORESOURCES);
296212f080e7Smrj 	}
296312f080e7Smrj 
296412f080e7Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
296512f080e7Smrj 	    size_t, dma->dp_copybuf_size);
296612f080e7Smrj 
296712f080e7Smrj 	return (DDI_SUCCESS);
296812f080e7Smrj }
296912f080e7Smrj 
297012f080e7Smrj 
297112f080e7Smrj /*
297212f080e7Smrj  * rootnex_setup_windows()
297312f080e7Smrj  *    Called in bind slowpath to setup the window state. We always have windows
297412f080e7Smrj  *    in the slowpath. Even if the window count = 1.
297512f080e7Smrj  */
297612f080e7Smrj static int
297712f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
297812f080e7Smrj     ddi_dma_attr_t *attr, int kmflag)
297912f080e7Smrj {
298012f080e7Smrj 	rootnex_window_t *windowp;
298112f080e7Smrj 	rootnex_sglinfo_t *sinfo;
298212f080e7Smrj 	size_t copy_state_size;
298312f080e7Smrj 	size_t win_state_size;
298412f080e7Smrj 	size_t state_available;
298512f080e7Smrj 	size_t space_needed;
298612f080e7Smrj 	uint_t copybuf_win;
298712f080e7Smrj 	uint_t maxxfer_win;
298812f080e7Smrj 	size_t space_used;
298912f080e7Smrj 	uint_t sglwin;
299012f080e7Smrj 
299112f080e7Smrj 
299212f080e7Smrj 	sinfo = &dma->dp_sglinfo;
299312f080e7Smrj 
299412f080e7Smrj 	dma->dp_current_win = 0;
299512f080e7Smrj 	hp->dmai_nwin = 0;
299612f080e7Smrj 
299712f080e7Smrj 	/* If we don't need to do a partial, we only have one window */
299812f080e7Smrj 	if (!dma->dp_partial_required) {
299912f080e7Smrj 		dma->dp_max_win = 1;
300012f080e7Smrj 
300112f080e7Smrj 	/*
300212f080e7Smrj 	 * we need multiple windows, need to figure out the worse case number
300312f080e7Smrj 	 * of windows.
300412f080e7Smrj 	 */
30057c478bd9Sstevel@tonic-gate 	} else {
30067c478bd9Sstevel@tonic-gate 		/*
300712f080e7Smrj 		 * if we need windows because we need more copy buffer that
300812f080e7Smrj 		 * we allow, the worse case number of windows we could need
300912f080e7Smrj 		 * here would be (copybuf space required / copybuf space that
301012f080e7Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
301112f080e7Smrj 		 * extra pages on the trim for the first and last pages of the
301212f080e7Smrj 		 * buffer (a page is the minimum window size so under the right
301312f080e7Smrj 		 * attr settings, you could have a window for each page).
301412f080e7Smrj 		 * The last page will only be hit here if the size is not a
301512f080e7Smrj 		 * multiple of the granularity (which theoretically shouldn't
301612f080e7Smrj 		 * be the case but never has been enforced, so we could have
301712f080e7Smrj 		 * broken things without it).
30187c478bd9Sstevel@tonic-gate 		 */
301912f080e7Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
302012f080e7Smrj 			ASSERT(dma->dp_copybuf_size > 0);
302112f080e7Smrj 			copybuf_win = (sinfo->si_copybuf_req /
302212f080e7Smrj 			    dma->dp_copybuf_size) + 1 + 2;
30237c478bd9Sstevel@tonic-gate 		} else {
302412f080e7Smrj 			copybuf_win = 0;
30257c478bd9Sstevel@tonic-gate 		}
302612f080e7Smrj 
302712f080e7Smrj 		/*
302812f080e7Smrj 		 * if we need windows because we have more cookies than the H/W
302912f080e7Smrj 		 * can handle, the number of windows we would need here would
303012f080e7Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
303112f080e7Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
303212f080e7Smrj 		 * (see above comment about trim)
303312f080e7Smrj 		 */
303412f080e7Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
303512f080e7Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
303612f080e7Smrj 			    + 1) + 2;
30377c478bd9Sstevel@tonic-gate 		} else {
303812f080e7Smrj 			sglwin = 0;
30397c478bd9Sstevel@tonic-gate 		}
304012f080e7Smrj 
304112f080e7Smrj 		/*
304212f080e7Smrj 		 * if we need windows because we're binding more memory than the
304312f080e7Smrj 		 * H/W can transfer at once, the number of windows we would need
304412f080e7Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
304512f080e7Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
304612f080e7Smrj 		 * trim (see above comment about trim)
304712f080e7Smrj 		 */
304812f080e7Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
304912f080e7Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
305012f080e7Smrj 			    dma->dp_maxxfer) + 1 + 2;
305112f080e7Smrj 		} else {
305212f080e7Smrj 			maxxfer_win = 0;
30537c478bd9Sstevel@tonic-gate 		}
305412f080e7Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
305512f080e7Smrj 		ASSERT(dma->dp_max_win > 0);
305612f080e7Smrj 	}
305712f080e7Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
305812f080e7Smrj 
305912f080e7Smrj 	/*
306012f080e7Smrj 	 * Get space for window and potential copy buffer state. Before we
306112f080e7Smrj 	 * go and allocate memory, see if we can get away with using what's
306212f080e7Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
306312f080e7Smrj 	 */
306412f080e7Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
306512f080e7Smrj 	    sizeof (ddi_dma_cookie_t));
306612f080e7Smrj 
306712f080e7Smrj 	/* if we dynamically allocated space for the cookies */
306812f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
306912f080e7Smrj 		/* if we have more space in the pre-allocted buffer, use it */
307012f080e7Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
307112f080e7Smrj 		if ((dma->dp_cookie_size - space_used) <=
307212f080e7Smrj 		    rootnex_state->r_prealloc_size) {
307312f080e7Smrj 			state_available = rootnex_state->r_prealloc_size;
307412f080e7Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
307512f080e7Smrj 
307612f080e7Smrj 		/*
307712f080e7Smrj 		 * else, we have more free space in the dynamically allocated
307812f080e7Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
307912f080e7Smrj 		 * didn't need a lot of cookies.
308012f080e7Smrj 		 */
308112f080e7Smrj 		} else {
308212f080e7Smrj 			state_available = dma->dp_cookie_size - space_used;
308312f080e7Smrj 			windowp = (rootnex_window_t *)
308412f080e7Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
308512f080e7Smrj 		}
308612f080e7Smrj 
308712f080e7Smrj 	/* we used the pre-alloced buffer */
308812f080e7Smrj 	} else {
308912f080e7Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
309012f080e7Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
309112f080e7Smrj 		windowp = (rootnex_window_t *)
309212f080e7Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
309312f080e7Smrj 	}
309412f080e7Smrj 
309512f080e7Smrj 	/*
309612f080e7Smrj 	 * figure out how much state we need to track the copy buffer. Add an
309712f080e7Smrj 	 * addition 8 bytes for pointer alignemnt later.
309812f080e7Smrj 	 */
309912f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
310012f080e7Smrj 		copy_state_size = sinfo->si_max_pages *
310112f080e7Smrj 		    sizeof (rootnex_pgmap_t);
310212f080e7Smrj 	} else {
310312f080e7Smrj 		copy_state_size = 0;
310412f080e7Smrj 	}
310512f080e7Smrj 	/* add an additional 8 bytes for pointer alignment */
310612f080e7Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
310712f080e7Smrj 
310812f080e7Smrj 	/* if we have enough space already, use it */
310912f080e7Smrj 	if (state_available >= space_needed) {
311012f080e7Smrj 		dma->dp_window = windowp;
311112f080e7Smrj 		dma->dp_need_to_free_window = B_FALSE;
311212f080e7Smrj 
311312f080e7Smrj 	/* not enough space, need to allocate more. */
311412f080e7Smrj 	} else {
311512f080e7Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
311612f080e7Smrj 		if (dma->dp_window == NULL) {
311712f080e7Smrj 			return (DDI_DMA_NORESOURCES);
311812f080e7Smrj 		}
311912f080e7Smrj 		dma->dp_need_to_free_window = B_TRUE;
312012f080e7Smrj 		dma->dp_window_size = space_needed;
312112f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
312212f080e7Smrj 		    dma->dp_dip, size_t, space_needed);
312312f080e7Smrj 	}
312412f080e7Smrj 
312512f080e7Smrj 	/*
312612f080e7Smrj 	 * we allocate copy buffer state and window state at the same time.
312712f080e7Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
312812f080e7Smrj 	 */
312912f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
313012f080e7Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
313112f080e7Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
313212f080e7Smrj 
313312f080e7Smrj #if !defined(__amd64)
313412f080e7Smrj 		/*
313512f080e7Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
313612f080e7Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
313712f080e7Smrj 		 */
313812f080e7Smrj 		bzero(dma->dp_pgmap, copy_state_size);
313912f080e7Smrj #endif
314012f080e7Smrj 	} else {
314112f080e7Smrj 		dma->dp_pgmap = NULL;
314212f080e7Smrj 	}
314312f080e7Smrj 
314412f080e7Smrj 	return (DDI_SUCCESS);
314512f080e7Smrj }
314612f080e7Smrj 
314712f080e7Smrj 
314812f080e7Smrj /*
314912f080e7Smrj  * rootnex_teardown_copybuf()
315012f080e7Smrj  *    cleans up after rootnex_setup_copybuf()
315112f080e7Smrj  */
315212f080e7Smrj static void
315312f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
315412f080e7Smrj {
315512f080e7Smrj #if !defined(__amd64)
315612f080e7Smrj 	int i;
315712f080e7Smrj 
315812f080e7Smrj 	/*
315912f080e7Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
316012f080e7Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
316112f080e7Smrj 	 * arena. Then free the VMEM space.
316212f080e7Smrj 	 */
316312f080e7Smrj 	if (dma->dp_kva != NULL) {
316412f080e7Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
316512f080e7Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
316612f080e7Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
316712f080e7Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
316812f080e7Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
316912f080e7Smrj 			}
317012f080e7Smrj 		}
317112f080e7Smrj 
317212f080e7Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
317312f080e7Smrj 	}
317412f080e7Smrj 
317512f080e7Smrj #endif
317612f080e7Smrj 
317712f080e7Smrj 	/* if we allocated a copy buffer, free it */
317812f080e7Smrj 	if (dma->dp_cbaddr != NULL) {
31797b93957cSeota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
318012f080e7Smrj 	}
318112f080e7Smrj }
318212f080e7Smrj 
318312f080e7Smrj 
318412f080e7Smrj /*
318512f080e7Smrj  * rootnex_teardown_windows()
318612f080e7Smrj  *    cleans up after rootnex_setup_windows()
318712f080e7Smrj  */
318812f080e7Smrj static void
318912f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
319012f080e7Smrj {
319112f080e7Smrj 	/*
319212f080e7Smrj 	 * if we had to allocate window state on the last bind (because we
319312f080e7Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
319412f080e7Smrj 	 */
319512f080e7Smrj 	if (dma->dp_need_to_free_window) {
319612f080e7Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
319712f080e7Smrj 	}
319812f080e7Smrj }
319912f080e7Smrj 
320012f080e7Smrj 
320112f080e7Smrj /*
320212f080e7Smrj  * rootnex_init_win()
320312f080e7Smrj  *    Called in bind slow path during creation of a new window. Initializes
320412f080e7Smrj  *    window state to default values.
320512f080e7Smrj  */
320612f080e7Smrj /*ARGSUSED*/
320712f080e7Smrj static void
320812f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
320912f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
321012f080e7Smrj {
321112f080e7Smrj 	hp->dmai_nwin++;
321212f080e7Smrj 	window->wd_dosync = B_FALSE;
321312f080e7Smrj 	window->wd_offset = cur_offset;
321412f080e7Smrj 	window->wd_size = 0;
321512f080e7Smrj 	window->wd_first_cookie = cookie;
321612f080e7Smrj 	window->wd_cookie_cnt = 0;
321712f080e7Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
321812f080e7Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
321912f080e7Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
322012f080e7Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
322112f080e7Smrj #if !defined(__amd64)
322212f080e7Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
322312f080e7Smrj #endif
322412f080e7Smrj }
322512f080e7Smrj 
322612f080e7Smrj 
322712f080e7Smrj /*
322812f080e7Smrj  * rootnex_setup_cookie()
322912f080e7Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
323012f080e7Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
323112f080e7Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
323212f080e7Smrj  *    need during sync.
323312f080e7Smrj  */
323412f080e7Smrj static void
323512f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
323612f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
323712f080e7Smrj     page_t **cur_pp)
323812f080e7Smrj {
323912f080e7Smrj 	boolean_t copybuf_sz_power_2;
324012f080e7Smrj 	rootnex_sglinfo_t *sinfo;
3241843e1988Sjohnlev 	paddr_t paddr;
324212f080e7Smrj 	uint_t pidx;
324312f080e7Smrj 	uint_t pcnt;
324412f080e7Smrj 	off_t poff;
324512f080e7Smrj #if defined(__amd64)
324612f080e7Smrj 	pfn_t pfn;
324712f080e7Smrj #else
324812f080e7Smrj 	page_t **pplist;
324912f080e7Smrj #endif
325012f080e7Smrj 
325112f080e7Smrj 	sinfo = &dma->dp_sglinfo;
325212f080e7Smrj 
325312f080e7Smrj 	/*
325412f080e7Smrj 	 * Calculate the page index relative to the start of the buffer. The
325512f080e7Smrj 	 * index to the current page for our buffer is the offset into the
325612f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
325712f080e7Smrj 	 * itself, shifted of course...
325812f080e7Smrj 	 */
325912f080e7Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
326012f080e7Smrj 	ASSERT(pidx < sinfo->si_max_pages);
326112f080e7Smrj 
326212f080e7Smrj 	/* if this cookie uses the copy buffer */
326312f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
326412f080e7Smrj 		/*
326512f080e7Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
326612f080e7Smrj 		 * is <= MMU_PAGESIZE.
326712f080e7Smrj 		 */
326812f080e7Smrj 
326912f080e7Smrj 		/*
327012f080e7Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
327112f080e7Smrj 		 * pfn which we'll use with seg kpm.
327212f080e7Smrj 		 */
3273843e1988Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
327412f080e7Smrj #if defined(__amd64)
3275843e1988Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
3276843e1988Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
3277843e1988Sjohnlev #endif /* __amd64 */
327812f080e7Smrj 
327912f080e7Smrj 		/* figure out if the copybuf size is a power of 2 */
328012f080e7Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
328112f080e7Smrj 			copybuf_sz_power_2 = B_FALSE;
328212f080e7Smrj 		} else {
328312f080e7Smrj 			copybuf_sz_power_2 = B_TRUE;
328412f080e7Smrj 		}
328512f080e7Smrj 
328612f080e7Smrj 		/* This page uses the copy buffer */
328712f080e7Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
328812f080e7Smrj 
328912f080e7Smrj 		/*
329012f080e7Smrj 		 * save the copy buffer KVA that we'll use with this page.
329112f080e7Smrj 		 * if we still fit within the copybuf, it's a simple add.
329212f080e7Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
329312f080e7Smrj 		 */
329412f080e7Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
329512f080e7Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
329612f080e7Smrj 			    *copybuf_used;
329712f080e7Smrj 		} else {
329812f080e7Smrj 			if (copybuf_sz_power_2) {
329912f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
330012f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
330112f080e7Smrj 				    (*copybuf_used &
330212f080e7Smrj 				    (dma->dp_copybuf_size - 1)));
330312f080e7Smrj 			} else {
330412f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
330512f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
330612f080e7Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
330712f080e7Smrj 			}
330812f080e7Smrj 		}
330912f080e7Smrj 
331012f080e7Smrj 		/*
331112f080e7Smrj 		 * over write the cookie physical address with the address of
331212f080e7Smrj 		 * the physical address of the copy buffer page that we will
331312f080e7Smrj 		 * use.
331412f080e7Smrj 		 */
3315843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
331612f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
331712f080e7Smrj 
3318843e1988Sjohnlev #ifdef __xpv
3319843e1988Sjohnlev 		/*
3320843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3321843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3322843e1988Sjohnlev 		 */
3323843e1988Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3324843e1988Sjohnlev #else
3325843e1988Sjohnlev 		cookie->dmac_laddress = paddr;
3326843e1988Sjohnlev #endif
3327843e1988Sjohnlev 
332812f080e7Smrj 		/* if we have a kernel VA, it's easy, just save that address */
332912f080e7Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
333012f080e7Smrj 		    (sinfo->si_asp == &kas)) {
333112f080e7Smrj 			/*
333212f080e7Smrj 			 * save away the page aligned virtual address of the
333312f080e7Smrj 			 * driver buffer. Offsets are handled in the sync code.
333412f080e7Smrj 			 */
333512f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
333612f080e7Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
333712f080e7Smrj 			    & MMU_PAGEMASK);
333812f080e7Smrj #if !defined(__amd64)
333912f080e7Smrj 			/*
334012f080e7Smrj 			 * we didn't need to, and will never need to map this
334112f080e7Smrj 			 * page.
334212f080e7Smrj 			 */
334312f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
334412f080e7Smrj #endif
334512f080e7Smrj 
334612f080e7Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
334712f080e7Smrj 		} else {
334812f080e7Smrj #if defined(__amd64)
334912f080e7Smrj 			/*
335012f080e7Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
335112f080e7Smrj 			 * get a Kernel VA for the corresponding pfn.
335212f080e7Smrj 			 */
335312f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
335412f080e7Smrj #else
335512f080e7Smrj 			/*
335612f080e7Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
335712f080e7Smrj 			 * save away the page_t or user VA for this page. This
335812f080e7Smrj 			 * is needed in rootnex_dma_win() when we switch to a
335912f080e7Smrj 			 * new window which requires us to re-map the copy
336012f080e7Smrj 			 * buffer.
336112f080e7Smrj 			 */
336212f080e7Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
336312f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
336412f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
336512f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
336612f080e7Smrj 			} else if (pplist != NULL) {
336712f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
336812f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
336912f080e7Smrj 			} else {
337012f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
337112f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
337212f080e7Smrj 				    (((uintptr_t)
337312f080e7Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
337412f080e7Smrj 				    cur_offset) & MMU_PAGEMASK);
337512f080e7Smrj 			}
337612f080e7Smrj 
337712f080e7Smrj 			/*
337812f080e7Smrj 			 * save away the page aligned virtual address which was
337912f080e7Smrj 			 * allocated from the kernel heap arena (taking into
338012f080e7Smrj 			 * account if we need more copy buffer than we alloced
338112f080e7Smrj 			 * and use multiple windows to handle this, i.e. &,%).
338212f080e7Smrj 			 * NOTE: there isn't and physical memory backing up this
338312f080e7Smrj 			 * virtual address space currently.
338412f080e7Smrj 			 */
338512f080e7Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
338612f080e7Smrj 			    dma->dp_copybuf_size) {
338712f080e7Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
338812f080e7Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
338912f080e7Smrj 				    MMU_PAGEMASK);
339012f080e7Smrj 			} else {
339112f080e7Smrj 				if (copybuf_sz_power_2) {
339212f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
339312f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
339412f080e7Smrj 					    (*copybuf_used &
339512f080e7Smrj 					    (dma->dp_copybuf_size - 1))) &
339612f080e7Smrj 					    MMU_PAGEMASK);
339712f080e7Smrj 				} else {
339812f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
339912f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
340012f080e7Smrj 					    (*copybuf_used %
340112f080e7Smrj 					    dma->dp_copybuf_size)) &
340212f080e7Smrj 					    MMU_PAGEMASK);
340312f080e7Smrj 				}
340412f080e7Smrj 			}
340512f080e7Smrj 
340612f080e7Smrj 			/*
340712f080e7Smrj 			 * if we haven't used up the available copy buffer yet,
340812f080e7Smrj 			 * map the kva to the physical page.
340912f080e7Smrj 			 */
341012f080e7Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
341112f080e7Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
341212f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
341312f080e7Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
341412f080e7Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
341512f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
341612f080e7Smrj 				} else {
341712f080e7Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
341812f080e7Smrj 					    sinfo->si_asp,
341912f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
342012f080e7Smrj 				}
342112f080e7Smrj 
342212f080e7Smrj 			/*
342312f080e7Smrj 			 * we've used up the available copy buffer, this page
342412f080e7Smrj 			 * will have to be mapped during rootnex_dma_win() when
342512f080e7Smrj 			 * we switch to a new window which requires a re-map
342612f080e7Smrj 			 * the copy buffer. (32-bit kernel only)
342712f080e7Smrj 			 */
342812f080e7Smrj 			} else {
342912f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
343012f080e7Smrj 			}
343112f080e7Smrj #endif
343212f080e7Smrj 			/* go to the next page_t */
343312f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
343412f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
343512f080e7Smrj 			}
343612f080e7Smrj 		}
343712f080e7Smrj 
343812f080e7Smrj 		/* add to the copy buffer count */
343912f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
344012f080e7Smrj 
344112f080e7Smrj 	/*
344212f080e7Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
344312f080e7Smrj 	 * cookie occupies to reflect this.
344412f080e7Smrj 	 */
344512f080e7Smrj 	} else {
344612f080e7Smrj 		/*
344712f080e7Smrj 		 * figure out how many pages the cookie occupies. We need to
344812f080e7Smrj 		 * use the original page offset of the buffer and the cookies
344912f080e7Smrj 		 * offset in the buffer to do this.
345012f080e7Smrj 		 */
345112f080e7Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
345212f080e7Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
345312f080e7Smrj 
345412f080e7Smrj 		while (pcnt > 0) {
345512f080e7Smrj #if !defined(__amd64)
345612f080e7Smrj 			/*
345712f080e7Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
345812f080e7Smrj 			 * to map in the driver buffer (if it didn't come down
345912f080e7Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
346012f080e7Smrj 			 * use the copy buffer, it's not, or will it ever, have
346112f080e7Smrj 			 * to be mapped in.
346212f080e7Smrj 			 */
346312f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
346412f080e7Smrj #endif
346512f080e7Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
346612f080e7Smrj 
346712f080e7Smrj 			/*
346812f080e7Smrj 			 * we need to update pidx and cur_pp or we'll loose
346912f080e7Smrj 			 * track of where we are.
347012f080e7Smrj 			 */
347112f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
347212f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
347312f080e7Smrj 			}
347412f080e7Smrj 			pidx++;
347512f080e7Smrj 			pcnt--;
347612f080e7Smrj 		}
347712f080e7Smrj 	}
347812f080e7Smrj }
347912f080e7Smrj 
348012f080e7Smrj 
348112f080e7Smrj /*
348212f080e7Smrj  * rootnex_sgllen_window_boundary()
348312f080e7Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
348412f080e7Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
348512f080e7Smrj  *    length supported by the DMA H/W.
348612f080e7Smrj  */
348712f080e7Smrj static int
348812f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
348912f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
349012f080e7Smrj     off_t cur_offset)
349112f080e7Smrj {
349212f080e7Smrj 	off_t new_offset;
349312f080e7Smrj 	size_t trim_sz;
349412f080e7Smrj 	off_t coffset;
349512f080e7Smrj 
349612f080e7Smrj 
349712f080e7Smrj 	/*
349812f080e7Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
349912f080e7Smrj 	 * the next window and init it. We're done.
350012f080e7Smrj 	 */
350112f080e7Smrj 	if (!dma->dp_trim_required) {
350212f080e7Smrj 		(*windowp)++;
350312f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
350412f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
350512f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
350612f080e7Smrj 		return (DDI_SUCCESS);
350712f080e7Smrj 	}
350812f080e7Smrj 
350912f080e7Smrj 	/* figure out how much we need to trim from the window */
351012f080e7Smrj 	ASSERT(attr->dma_attr_granular != 0);
351112f080e7Smrj 	if (dma->dp_granularity_power_2) {
351212f080e7Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
351312f080e7Smrj 	} else {
351412f080e7Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
351512f080e7Smrj 	}
351612f080e7Smrj 
351712f080e7Smrj 	/* The window's a whole multiple of granularity. We're done */
351812f080e7Smrj 	if (trim_sz == 0) {
351912f080e7Smrj 		(*windowp)++;
352012f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
352112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
352212f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
352312f080e7Smrj 		return (DDI_SUCCESS);
352412f080e7Smrj 	}
352512f080e7Smrj 
352612f080e7Smrj 	/*
352712f080e7Smrj 	 * The window's not a whole multiple of granularity, since we know this
352812f080e7Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
352912f080e7Smrj 	 * that one, add the left over part of the old cookie into the new
353012f080e7Smrj 	 * window, and then add in the new cookie into the new window.
353112f080e7Smrj 	 */
353212f080e7Smrj 
353312f080e7Smrj 	/*
353412f080e7Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
353512f080e7Smrj 	 * sgllen == 1 don't go together.
353612f080e7Smrj 	 */
353712f080e7Smrj 	if (attr->dma_attr_sgllen == 1) {
353812f080e7Smrj 		return (DDI_DMA_NOMAPPING);
353912f080e7Smrj 	}
354012f080e7Smrj 
354112f080e7Smrj 	/*
354212f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
354312f080e7Smrj 	 * back to the last cookie for this.
354412f080e7Smrj 	 */
354512f080e7Smrj 	cookie--;
354612f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
354712f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3548843e1988Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
354912f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
355012f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
355112f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
355212f080e7Smrj 
355312f080e7Smrj 	/* save the buffer offsets for the next window */
355412f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
355512f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
355612f080e7Smrj 
355712f080e7Smrj 	/*
355812f080e7Smrj 	 * set this now in case this is the first window. all other cases are
355912f080e7Smrj 	 * set in dma_win()
356012f080e7Smrj 	 */
356112f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
356212f080e7Smrj 
356312f080e7Smrj 	/*
356412f080e7Smrj 	 * initialize the next window using what's left over in the previous
356512f080e7Smrj 	 * cookie.
356612f080e7Smrj 	 */
356712f080e7Smrj 	(*windowp)++;
356812f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
356912f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
357012f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3571843e1988Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
357212f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
357312f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
357412f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
357512f080e7Smrj 	}
357612f080e7Smrj 
357712f080e7Smrj 	/*
357812f080e7Smrj 	 * now go back to the current cookie and add it to the new window. set
357912f080e7Smrj 	 * the new window size to the what was left over from the previous
358012f080e7Smrj 	 * cookie and what's in the current cookie.
358112f080e7Smrj 	 */
358212f080e7Smrj 	cookie++;
358312f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
358412f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
358512f080e7Smrj 
358612f080e7Smrj 	/*
358712f080e7Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
358812f080e7Smrj 	 * a max size of maxxfer). Handle that case.
358912f080e7Smrj 	 */
359012f080e7Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
359112f080e7Smrj 		/*
359212f080e7Smrj 		 * maxxfer is already a whole multiple of granularity, and this
359312f080e7Smrj 		 * trim will be <= the previous trim (since a cookie can't be
359412f080e7Smrj 		 * larger than maxxfer). Make things simple here.
359512f080e7Smrj 		 */
359612f080e7Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
359712f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
359812f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3599843e1988Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
360012f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
360112f080e7Smrj 		(*windowp)->wd_size -= trim_sz;
360212f080e7Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
360312f080e7Smrj 
360412f080e7Smrj 		/* save the buffer offsets for the next window */
360512f080e7Smrj 		coffset = cookie->dmac_size - trim_sz;
360612f080e7Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
360712f080e7Smrj 
360812f080e7Smrj 		/* setup the next window */
360912f080e7Smrj 		(*windowp)++;
361012f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
361112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
361212f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3613843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
361412f080e7Smrj 		    coffset;
361512f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
361612f080e7Smrj 	}
361712f080e7Smrj 
361812f080e7Smrj 	return (DDI_SUCCESS);
361912f080e7Smrj }
362012f080e7Smrj 
362112f080e7Smrj 
362212f080e7Smrj /*
362312f080e7Smrj  * rootnex_copybuf_window_boundary()
362412f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we used
362512f080e7Smrj  *    up all the copy buffer that we have.
362612f080e7Smrj  */
362712f080e7Smrj static int
362812f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
362912f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
363012f080e7Smrj     size_t *copybuf_used)
363112f080e7Smrj {
363212f080e7Smrj 	rootnex_sglinfo_t *sinfo;
363312f080e7Smrj 	off_t new_offset;
363412f080e7Smrj 	size_t trim_sz;
3635843e1988Sjohnlev 	paddr_t paddr;
363612f080e7Smrj 	off_t coffset;
363712f080e7Smrj 	uint_t pidx;
363812f080e7Smrj 	off_t poff;
363912f080e7Smrj 
364012f080e7Smrj 
364112f080e7Smrj 	sinfo = &dma->dp_sglinfo;
364212f080e7Smrj 
364312f080e7Smrj 	/*
364412f080e7Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
364512f080e7Smrj 	 * this cookie is <= MMU_PAGESIZE.
364612f080e7Smrj 	 */
364712f080e7Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
364812f080e7Smrj 
364912f080e7Smrj 	/*
365012f080e7Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
365112f080e7Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
365212f080e7Smrj 	 * space...
365312f080e7Smrj 	 */
365412f080e7Smrj #if !defined(__amd64)
365512f080e7Smrj 	dma->dp_cb_remaping = B_TRUE;
365612f080e7Smrj #endif
365712f080e7Smrj 
365812f080e7Smrj 	/* reset copybuf used */
365912f080e7Smrj 	*copybuf_used = 0;
366012f080e7Smrj 
366112f080e7Smrj 	/*
366212f080e7Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
366312f080e7Smrj 	 * next window and add the current cookie to it. We know the current
366412f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
366512f080e7Smrj 	 */
366612f080e7Smrj 	if (!dma->dp_trim_required) {
366712f080e7Smrj 		(*windowp)++;
366812f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
366912f080e7Smrj 
367012f080e7Smrj 		/* Add this cookie to the new window */
367112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
367212f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
367312f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
367412f080e7Smrj 		return (DDI_SUCCESS);
367512f080e7Smrj 	}
367612f080e7Smrj 
367712f080e7Smrj 	/*
367812f080e7Smrj 	 * *** may need to trim, figure it out.
367912f080e7Smrj 	 */
368012f080e7Smrj 
368112f080e7Smrj 	/* figure out how much we need to trim from the window */
368212f080e7Smrj 	if (dma->dp_granularity_power_2) {
368312f080e7Smrj 		trim_sz = (*windowp)->wd_size &
368412f080e7Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
368512f080e7Smrj 	} else {
368612f080e7Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
368712f080e7Smrj 	}
368812f080e7Smrj 
368912f080e7Smrj 	/*
369012f080e7Smrj 	 * if the window's a whole multiple of granularity, go to the next
369112f080e7Smrj 	 * window, init it, then add in the current cookie. We know the current
369212f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
369312f080e7Smrj 	 */
369412f080e7Smrj 	if (trim_sz == 0) {
369512f080e7Smrj 		(*windowp)++;
369612f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
369712f080e7Smrj 
369812f080e7Smrj 		/* Add this cookie to the new window */
369912f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
370012f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
370112f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
370212f080e7Smrj 		return (DDI_SUCCESS);
370312f080e7Smrj 	}
370412f080e7Smrj 
370512f080e7Smrj 	/*
370612f080e7Smrj 	 * *** We figured it out, we definitly need to trim
370712f080e7Smrj 	 */
370812f080e7Smrj 
370912f080e7Smrj 	/*
371012f080e7Smrj 	 * make sure the driver isn't making us do something bad...
371112f080e7Smrj 	 * Trimming and sgllen == 1 don't go together.
371212f080e7Smrj 	 */
371312f080e7Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
371412f080e7Smrj 		return (DDI_DMA_NOMAPPING);
371512f080e7Smrj 	}
371612f080e7Smrj 
371712f080e7Smrj 	/*
371812f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
371912f080e7Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
372012f080e7Smrj 	 * the current window, and some of the last cookie will be in the new
372112f080e7Smrj 	 * window. All of the current cookie will be in the new window.
372212f080e7Smrj 	 */
372312f080e7Smrj 	cookie--;
372412f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
372512f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3726843e1988Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
372712f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
372812f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
372912f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
373012f080e7Smrj 
373112f080e7Smrj 	/*
373212f080e7Smrj 	 * we're trimming the last cookie (not the current cookie). So that
373312f080e7Smrj 	 * last cookie may have or may not have been using the copy buffer (
373412f080e7Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
373512f080e7Smrj 	 * this code path).
373612f080e7Smrj 	 *
373712f080e7Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
373812f080e7Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
373912f080e7Smrj 	 * last page in the current window and the first page in the next
374012f080e7Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
374112f080e7Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
374212f080e7Smrj 	 * current window, and the start of the copy buffer in the next window.
374312f080e7Smrj 	 * Track that info... The cookie physical address was already set to
374412f080e7Smrj 	 * the copy buffer physical address in setup_cookie..
374512f080e7Smrj 	 */
374612f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
374712f080e7Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
374812f080e7Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
374912f080e7Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
375012f080e7Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
375112f080e7Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
375212f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
375312f080e7Smrj #if !defined(__amd64)
375412f080e7Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
375512f080e7Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
375612f080e7Smrj #endif
375712f080e7Smrj 	}
375812f080e7Smrj 
375912f080e7Smrj 	/* save the buffer offsets for the next window */
376012f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
376112f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
376212f080e7Smrj 
376312f080e7Smrj 	/*
376412f080e7Smrj 	 * set this now in case this is the first window. all other cases are
376512f080e7Smrj 	 * set in dma_win()
376612f080e7Smrj 	 */
376712f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
376812f080e7Smrj 
376912f080e7Smrj 	/*
377012f080e7Smrj 	 * initialize the next window using what's left over in the previous
377112f080e7Smrj 	 * cookie.
377212f080e7Smrj 	 */
377312f080e7Smrj 	(*windowp)++;
377412f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
377512f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
377612f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3777843e1988Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
377812f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
377912f080e7Smrj 
378012f080e7Smrj 	/*
378112f080e7Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
378212f080e7Smrj 	 * read the comment above for more info on why we need to track
378312f080e7Smrj 	 * additional state.
378412f080e7Smrj 	 *
378512f080e7Smrj 	 * For the first cookie in the new window, we need reset the physical
378612f080e7Smrj 	 * address to DMA into to the start of the copy buffer plus any
378712f080e7Smrj 	 * initial page offset which may be present.
378812f080e7Smrj 	 */
378912f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
379012f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
379112f080e7Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
379212f080e7Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
379312f080e7Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
379412f080e7Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
3795843e1988Sjohnlev 
3796843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
3797843e1988Sjohnlev 		    poff;
3798843e1988Sjohnlev #ifdef __xpv
3799843e1988Sjohnlev 		/*
3800843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3801843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3802843e1988Sjohnlev 		 */
3803843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
3804843e1988Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3805843e1988Sjohnlev #else
3806843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
3807843e1988Sjohnlev #endif
3808843e1988Sjohnlev 
380912f080e7Smrj #if !defined(__amd64)
381012f080e7Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
381112f080e7Smrj #endif
381212f080e7Smrj 		/* account for the cookie copybuf usage in the new window */
381312f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
381412f080e7Smrj 
381512f080e7Smrj 		/*
381612f080e7Smrj 		 * every piece of code has to have a hack, and here is this
381712f080e7Smrj 		 * ones :-)
381812f080e7Smrj 		 *
381912f080e7Smrj 		 * There is a complex interaction between setup_cookie and the
382012f080e7Smrj 		 * copybuf window boundary. The complexity had to be in either
382112f080e7Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
382212f080e7Smrj 		 * copybuf code.
382312f080e7Smrj 		 *
382412f080e7Smrj 		 * So in this code path, we have taken the last cookie,
382512f080e7Smrj 		 * virtually broken it in half due to the trim, and it happens
382612f080e7Smrj 		 * to use the copybuf which further complicates life. At the
382712f080e7Smrj 		 * same time, we have already setup the current cookie, which
382812f080e7Smrj 		 * is now wrong. More background info: the current cookie uses
382912f080e7Smrj 		 * the copybuf, so it is only a page long max. So we need to
383012f080e7Smrj 		 * fix the current cookies copy buffer address, physical
383112f080e7Smrj 		 * address, and kva for the 32-bit kernel. We due this by
383212f080e7Smrj 		 * bumping them by page size (of course, we can't due this on
383312f080e7Smrj 		 * the physical address since the copy buffer may not be
383412f080e7Smrj 		 * physically contiguous).
383512f080e7Smrj 		 */
383612f080e7Smrj 		cookie++;
383712f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
3838843e1988Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3839843e1988Sjohnlev 
3840843e1988Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
384112f080e7Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
3842843e1988Sjohnlev #ifdef __xpv
3843843e1988Sjohnlev 		/*
3844843e1988Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
3845843e1988Sjohnlev 		 * the cookies with MAs instead of PAs.
3846843e1988Sjohnlev 		 */
3847843e1988Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
3848843e1988Sjohnlev #else
3849843e1988Sjohnlev 		cookie->dmac_laddress = paddr;
3850843e1988Sjohnlev #endif
3851843e1988Sjohnlev 
385212f080e7Smrj #if !defined(__amd64)
385312f080e7Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
385412f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
385512f080e7Smrj #endif
385612f080e7Smrj 	} else {
385712f080e7Smrj 		/* go back to the current cookie */
385812f080e7Smrj 		cookie++;
385912f080e7Smrj 	}
386012f080e7Smrj 
386112f080e7Smrj 	/*
386212f080e7Smrj 	 * add the current cookie to the new window. set the new window size to
386312f080e7Smrj 	 * the what was left over from the previous cookie and what's in the
386412f080e7Smrj 	 * current cookie.
386512f080e7Smrj 	 */
386612f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
386712f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
386812f080e7Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
386912f080e7Smrj 
387012f080e7Smrj 	/*
387112f080e7Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
387212f080e7Smrj 	 * wouldn't be here if it didn't.
387312f080e7Smrj 	 */
387412f080e7Smrj 	*copybuf_used += MMU_PAGESIZE;
387512f080e7Smrj 
387612f080e7Smrj 	return (DDI_SUCCESS);
387712f080e7Smrj }
387812f080e7Smrj 
387912f080e7Smrj 
388012f080e7Smrj /*
388112f080e7Smrj  * rootnex_maxxfer_window_boundary()
388212f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we will
388312f080e7Smrj  *    go over maxxfer.
388412f080e7Smrj  */
388512f080e7Smrj static int
388612f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
388712f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
388812f080e7Smrj {
388912f080e7Smrj 	size_t dmac_size;
389012f080e7Smrj 	off_t new_offset;
389112f080e7Smrj 	size_t trim_sz;
389212f080e7Smrj 	off_t coffset;
389312f080e7Smrj 
389412f080e7Smrj 
389512f080e7Smrj 	/*
389612f080e7Smrj 	 * calculate how much we have to trim off of the current cookie to equal
389712f080e7Smrj 	 * maxxfer. We don't have to account for granularity here since our
389812f080e7Smrj 	 * maxxfer already takes that into account.
389912f080e7Smrj 	 */
390012f080e7Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
390112f080e7Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
390212f080e7Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
390312f080e7Smrj 
390412f080e7Smrj 	/* save cookie size since we need it later and we might change it */
390512f080e7Smrj 	dmac_size = cookie->dmac_size;
390612f080e7Smrj 
390712f080e7Smrj 	/*
390812f080e7Smrj 	 * if we're not trimming the entire cookie, setup the current window to
390912f080e7Smrj 	 * account for the trim.
391012f080e7Smrj 	 */
391112f080e7Smrj 	if (trim_sz < cookie->dmac_size) {
391212f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
391312f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
391412f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3915843e1988Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
391612f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
391712f080e7Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
391812f080e7Smrj 
391912f080e7Smrj 		/*
392012f080e7Smrj 		 * set the adjusted cookie size now in case this is the first
392112f080e7Smrj 		 * window. All other windows are taken care of in get win
392212f080e7Smrj 		 */
392312f080e7Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
392412f080e7Smrj 	}
392512f080e7Smrj 
392612f080e7Smrj 	/*
392712f080e7Smrj 	 * coffset is the current offset within the cookie, new_offset is the
392812f080e7Smrj 	 * current offset with the entire buffer.
392912f080e7Smrj 	 */
393012f080e7Smrj 	coffset = dmac_size - trim_sz;
393112f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
393212f080e7Smrj 
393312f080e7Smrj 	/* initialize the next window */
393412f080e7Smrj 	(*windowp)++;
393512f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
393612f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
393712f080e7Smrj 	(*windowp)->wd_size = trim_sz;
393812f080e7Smrj 	if (trim_sz < dmac_size) {
393912f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3940843e1988Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
394112f080e7Smrj 		    coffset;
394212f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
394312f080e7Smrj 	}
394412f080e7Smrj 
394512f080e7Smrj 	return (DDI_SUCCESS);
394612f080e7Smrj }
394712f080e7Smrj 
394812f080e7Smrj 
394912f080e7Smrj /*ARGSUSED*/
395012f080e7Smrj static int
3951*20906b23SVikram Hegde rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
395212f080e7Smrj     off_t off, size_t len, uint_t cache_flags)
395312f080e7Smrj {
395412f080e7Smrj 	rootnex_sglinfo_t *sinfo;
395512f080e7Smrj 	rootnex_pgmap_t *cbpage;
395612f080e7Smrj 	rootnex_window_t *win;
395712f080e7Smrj 	ddi_dma_impl_t *hp;
395812f080e7Smrj 	rootnex_dma_t *dma;
395912f080e7Smrj 	caddr_t fromaddr;
396012f080e7Smrj 	caddr_t toaddr;
396112f080e7Smrj 	uint_t psize;
396212f080e7Smrj 	off_t offset;
396312f080e7Smrj 	uint_t pidx;
396412f080e7Smrj 	size_t size;
396512f080e7Smrj 	off_t poff;
396612f080e7Smrj 	int e;
396712f080e7Smrj 
396812f080e7Smrj 
396912f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
397012f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
397112f080e7Smrj 	sinfo = &dma->dp_sglinfo;
397212f080e7Smrj 
397312f080e7Smrj 	/*
397412f080e7Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
397512f080e7Smrj 	 * will cause us to have at least one window.
397612f080e7Smrj 	 */
397712f080e7Smrj 	if (dma->dp_window == NULL) {
397812f080e7Smrj 		return (DDI_SUCCESS);
397912f080e7Smrj 	}
398012f080e7Smrj 
398112f080e7Smrj 	/* This window may not need to be sync'd */
398212f080e7Smrj 	win = &dma->dp_window[dma->dp_current_win];
398312f080e7Smrj 	if (!win->wd_dosync) {
398412f080e7Smrj 		return (DDI_SUCCESS);
398512f080e7Smrj 	}
398612f080e7Smrj 
398712f080e7Smrj 	/* handle off and len special cases */
398812f080e7Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
398912f080e7Smrj 		offset = win->wd_offset;
399012f080e7Smrj 	} else {
399112f080e7Smrj 		offset = off;
399212f080e7Smrj 	}
399312f080e7Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
399412f080e7Smrj 		size = win->wd_size;
399512f080e7Smrj 	} else {
399612f080e7Smrj 		size = len;
399712f080e7Smrj 	}
399812f080e7Smrj 
399912f080e7Smrj 	/* check the sync args to make sure they make a little sense */
400012f080e7Smrj 	if (rootnex_sync_check_parms) {
400112f080e7Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
400212f080e7Smrj 		    cache_flags);
400312f080e7Smrj 		if (e != DDI_SUCCESS) {
400412f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
400512f080e7Smrj 			return (DDI_FAILURE);
400612f080e7Smrj 		}
400712f080e7Smrj 	}
400812f080e7Smrj 
400912f080e7Smrj 	/*
401012f080e7Smrj 	 * special case the first page to handle the offset into the page. The
401112f080e7Smrj 	 * offset to the current page for our buffer is the offset into the
401212f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
401312f080e7Smrj 	 * itself, masked of course.
401412f080e7Smrj 	 */
401512f080e7Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
401612f080e7Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
401712f080e7Smrj 
401812f080e7Smrj 	/* go through all the pages that we want to sync */
401912f080e7Smrj 	while (size > 0) {
402012f080e7Smrj 		/*
402112f080e7Smrj 		 * Calculate the page index relative to the start of the buffer.
402212f080e7Smrj 		 * The index to the current page for our buffer is the offset
402312f080e7Smrj 		 * into the first page of the buffer plus our current offset
402412f080e7Smrj 		 * into the buffer itself, shifted of course...
402512f080e7Smrj 		 */
402612f080e7Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
402712f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
402812f080e7Smrj 
402912f080e7Smrj 		/*
403012f080e7Smrj 		 * if this page uses the copy buffer, we need to sync it,
403112f080e7Smrj 		 * otherwise, go on to the next page.
403212f080e7Smrj 		 */
403312f080e7Smrj 		cbpage = &dma->dp_pgmap[pidx];
403412f080e7Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
403512f080e7Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
403612f080e7Smrj 		if (cbpage->pm_uses_copybuf) {
403712f080e7Smrj 			/* cbaddr and kaddr should be page aligned */
403812f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
403912f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
404012f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
404112f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
404212f080e7Smrj 
404312f080e7Smrj 			/*
404412f080e7Smrj 			 * if we're copying for the device, we are going to
404512f080e7Smrj 			 * copy from the drivers buffer and to the rootnex
404612f080e7Smrj 			 * allocated copy buffer.
404712f080e7Smrj 			 */
404812f080e7Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
404912f080e7Smrj 				fromaddr = cbpage->pm_kaddr + poff;
405012f080e7Smrj 				toaddr = cbpage->pm_cbaddr + poff;
405112f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
405212f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
405312f080e7Smrj 
405412f080e7Smrj 			/*
405512f080e7Smrj 			 * if we're copying for the cpu/kernel, we are going to
405612f080e7Smrj 			 * copy from the rootnex allocated copy buffer to the
405712f080e7Smrj 			 * drivers buffer.
405812f080e7Smrj 			 */
405912f080e7Smrj 			} else {
406012f080e7Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
406112f080e7Smrj 				toaddr = cbpage->pm_kaddr + poff;
406212f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
406312f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
406412f080e7Smrj 			}
406512f080e7Smrj 
406612f080e7Smrj 			bcopy(fromaddr, toaddr, psize);
406712f080e7Smrj 		}
406812f080e7Smrj 
406912f080e7Smrj 		/*
407012f080e7Smrj 		 * decrement size until we're done, update our offset into the
407112f080e7Smrj 		 * buffer, and get the next page size.
407212f080e7Smrj 		 */
407312f080e7Smrj 		size -= psize;
407412f080e7Smrj 		offset += psize;
407512f080e7Smrj 		psize = MIN(MMU_PAGESIZE, size);
407612f080e7Smrj 
407712f080e7Smrj 		/* page offset is zero for the rest of this loop */
407812f080e7Smrj 		poff = 0;
407912f080e7Smrj 	}
408012f080e7Smrj 
408112f080e7Smrj 	return (DDI_SUCCESS);
408212f080e7Smrj }
408312f080e7Smrj 
4084*20906b23SVikram Hegde /*
4085*20906b23SVikram Hegde  * rootnex_dma_sync()
4086*20906b23SVikram Hegde  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4087*20906b23SVikram Hegde  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4088*20906b23SVikram Hegde  *    is set, ddi_dma_sync() returns immediately passing back success.
4089*20906b23SVikram Hegde  */
4090*20906b23SVikram Hegde /*ARGSUSED*/
4091*20906b23SVikram Hegde static int
4092*20906b23SVikram Hegde rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4093*20906b23SVikram Hegde     off_t off, size_t len, uint_t cache_flags)
4094*20906b23SVikram Hegde {
4095*20906b23SVikram Hegde #if !defined(__xpv)
4096*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
4097*20906b23SVikram Hegde 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
4098*20906b23SVikram Hegde 		    cache_flags));
4099*20906b23SVikram Hegde 	}
4100*20906b23SVikram Hegde #endif
4101*20906b23SVikram Hegde 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
4102*20906b23SVikram Hegde 	    cache_flags));
4103*20906b23SVikram Hegde }
410412f080e7Smrj 
410512f080e7Smrj /*
410612f080e7Smrj  * rootnex_valid_sync_parms()
410712f080e7Smrj  *    checks the parameters passed to sync to verify they are correct.
410812f080e7Smrj  */
410912f080e7Smrj static int
411012f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
411112f080e7Smrj     off_t offset, size_t size, uint_t cache_flags)
411212f080e7Smrj {
411312f080e7Smrj 	off_t woffset;
411412f080e7Smrj 
411512f080e7Smrj 
411612f080e7Smrj 	/*
411712f080e7Smrj 	 * the first part of the test to make sure the offset passed in is
411812f080e7Smrj 	 * within the window.
411912f080e7Smrj 	 */
412012f080e7Smrj 	if (offset < win->wd_offset) {
412112f080e7Smrj 		return (DDI_FAILURE);
412212f080e7Smrj 	}
412312f080e7Smrj 
412412f080e7Smrj 	/*
412512f080e7Smrj 	 * second and last part of the test to make sure the offset and length
412612f080e7Smrj 	 * passed in is within the window.
412712f080e7Smrj 	 */
412812f080e7Smrj 	woffset = offset - win->wd_offset;
412912f080e7Smrj 	if ((woffset + size) > win->wd_size) {
413012f080e7Smrj 		return (DDI_FAILURE);
413112f080e7Smrj 	}
413212f080e7Smrj 
413312f080e7Smrj 	/*
413412f080e7Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
413512f080e7Smrj 	 * be set too.
413612f080e7Smrj 	 */
413712f080e7Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
413812f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
413912f080e7Smrj 		return (DDI_SUCCESS);
414012f080e7Smrj 	}
414112f080e7Smrj 
414212f080e7Smrj 	/*
414312f080e7Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
414412f080e7Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
414512f080e7Smrj 	 */
414612f080e7Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
414712f080e7Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
414812f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
414912f080e7Smrj 		return (DDI_SUCCESS);
415012f080e7Smrj 	}
415112f080e7Smrj 
415212f080e7Smrj 	return (DDI_FAILURE);
415312f080e7Smrj }
415412f080e7Smrj 
415512f080e7Smrj 
415612f080e7Smrj /*ARGSUSED*/
415712f080e7Smrj static int
4158*20906b23SVikram Hegde rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
415912f080e7Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
416012f080e7Smrj     uint_t *ccountp)
416112f080e7Smrj {
416212f080e7Smrj 	rootnex_window_t *window;
416312f080e7Smrj 	rootnex_trim_t *trim;
416412f080e7Smrj 	ddi_dma_impl_t *hp;
416512f080e7Smrj 	rootnex_dma_t *dma;
416612f080e7Smrj #if !defined(__amd64)
416712f080e7Smrj 	rootnex_sglinfo_t *sinfo;
416812f080e7Smrj 	rootnex_pgmap_t *pmap;
416912f080e7Smrj 	uint_t pidx;
417012f080e7Smrj 	uint_t pcnt;
417112f080e7Smrj 	off_t poff;
417212f080e7Smrj 	int i;
417312f080e7Smrj #endif
417412f080e7Smrj 
417512f080e7Smrj 
417612f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
417712f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
417812f080e7Smrj #if !defined(__amd64)
417912f080e7Smrj 	sinfo = &dma->dp_sglinfo;
418012f080e7Smrj #endif
418112f080e7Smrj 
418212f080e7Smrj 	/* If we try and get a window which doesn't exist, return failure */
418312f080e7Smrj 	if (win >= hp->dmai_nwin) {
418412f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
418512f080e7Smrj 		return (DDI_FAILURE);
418612f080e7Smrj 	}
418712f080e7Smrj 
418812f080e7Smrj 	/*
418912f080e7Smrj 	 * if we don't have any windows, and they're asking for the first
419012f080e7Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
419112f080e7Smrj 	 * setup our return values, then increment the cookie since we return
419212f080e7Smrj 	 * the first cookie on the stack.
419312f080e7Smrj 	 */
419412f080e7Smrj 	if (dma->dp_window == NULL) {
419512f080e7Smrj 		if (win != 0) {
419612f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
419712f080e7Smrj 			return (DDI_FAILURE);
419812f080e7Smrj 		}
419912f080e7Smrj 		hp->dmai_cookie = dma->dp_cookies;
420012f080e7Smrj 		*offp = 0;
420112f080e7Smrj 		*lenp = dma->dp_dma.dmao_size;
420212f080e7Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
420312f080e7Smrj 		*cookiep = hp->dmai_cookie[0];
420412f080e7Smrj 		hp->dmai_cookie++;
420512f080e7Smrj 		return (DDI_SUCCESS);
420612f080e7Smrj 	}
420712f080e7Smrj 
420812f080e7Smrj 	/* sync the old window before moving on to the new one */
420912f080e7Smrj 	window = &dma->dp_window[dma->dp_current_win];
421012f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
421112f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
421212f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
421312f080e7Smrj 	}
421412f080e7Smrj 
421512f080e7Smrj #if !defined(__amd64)
421612f080e7Smrj 	/*
421712f080e7Smrj 	 * before we move to the next window, if we need to re-map, unmap all
421812f080e7Smrj 	 * the pages in this window.
421912f080e7Smrj 	 */
422012f080e7Smrj 	if (dma->dp_cb_remaping) {
422112f080e7Smrj 		/*
422212f080e7Smrj 		 * If we switch to this window again, we'll need to map in
422312f080e7Smrj 		 * on the fly next time.
422412f080e7Smrj 		 */
422512f080e7Smrj 		window->wd_remap_copybuf = B_TRUE;
422612f080e7Smrj 
422712f080e7Smrj 		/*
422812f080e7Smrj 		 * calculate the page index into the buffer where this window
422912f080e7Smrj 		 * starts, and the number of pages this window takes up.
423012f080e7Smrj 		 */
423112f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
423212f080e7Smrj 		    MMU_PAGESHIFT;
423312f080e7Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
423412f080e7Smrj 		    MMU_PAGEOFFSET;
423512f080e7Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
423612f080e7Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
423712f080e7Smrj 
423812f080e7Smrj 		/* unmap pages which are currently mapped in this window */
423912f080e7Smrj 		for (i = 0; i < pcnt; i++) {
424012f080e7Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
424112f080e7Smrj 				hat_unload(kas.a_hat,
424212f080e7Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
424312f080e7Smrj 				    HAT_UNLOAD);
424412f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
424512f080e7Smrj 			}
424612f080e7Smrj 			pidx++;
424712f080e7Smrj 		}
424812f080e7Smrj 	}
424912f080e7Smrj #endif
425012f080e7Smrj 
425112f080e7Smrj 	/*
425212f080e7Smrj 	 * Move to the new window.
425312f080e7Smrj 	 * NOTE: current_win must be set for sync to work right
425412f080e7Smrj 	 */
425512f080e7Smrj 	dma->dp_current_win = win;
425612f080e7Smrj 	window = &dma->dp_window[win];
425712f080e7Smrj 
425812f080e7Smrj 	/* if needed, adjust the first and/or last cookies for trim */
425912f080e7Smrj 	trim = &window->wd_trim;
426012f080e7Smrj 	if (trim->tr_trim_first) {
4261843e1988Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
426212f080e7Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
426312f080e7Smrj #if !defined(__amd64)
426412f080e7Smrj 		window->wd_first_cookie->dmac_type =
426512f080e7Smrj 		    (window->wd_first_cookie->dmac_type &
426612f080e7Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
426712f080e7Smrj #endif
426812f080e7Smrj 		if (trim->tr_first_copybuf_win) {
426912f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
427012f080e7Smrj 			    trim->tr_first_cbaddr;
427112f080e7Smrj #if !defined(__amd64)
427212f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
427312f080e7Smrj 			    trim->tr_first_kaddr;
427412f080e7Smrj #endif
427512f080e7Smrj 		}
427612f080e7Smrj 	}
427712f080e7Smrj 	if (trim->tr_trim_last) {
4278843e1988Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
427912f080e7Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
428012f080e7Smrj 		if (trim->tr_last_copybuf_win) {
428112f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
428212f080e7Smrj 			    trim->tr_last_cbaddr;
428312f080e7Smrj #if !defined(__amd64)
428412f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
428512f080e7Smrj 			    trim->tr_last_kaddr;
428612f080e7Smrj #endif
428712f080e7Smrj 		}
428812f080e7Smrj 	}
428912f080e7Smrj 
429012f080e7Smrj 	/*
429112f080e7Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
429212f080e7Smrj 	 * our return values, then increment the cookie since we return the
429312f080e7Smrj 	 * first cookie on the stack.
429412f080e7Smrj 	 */
429512f080e7Smrj 	hp->dmai_cookie = window->wd_first_cookie;
429612f080e7Smrj 	*offp = window->wd_offset;
429712f080e7Smrj 	*lenp = window->wd_size;
429812f080e7Smrj 	*ccountp = window->wd_cookie_cnt;
429912f080e7Smrj 	*cookiep = hp->dmai_cookie[0];
430012f080e7Smrj 	hp->dmai_cookie++;
430112f080e7Smrj 
430212f080e7Smrj #if !defined(__amd64)
430312f080e7Smrj 	/* re-map copybuf if required for this window */
430412f080e7Smrj 	if (dma->dp_cb_remaping) {
430512f080e7Smrj 		/*
430612f080e7Smrj 		 * calculate the page index into the buffer where this
430712f080e7Smrj 		 * window starts.
430812f080e7Smrj 		 */
430912f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
431012f080e7Smrj 		    MMU_PAGESHIFT;
431112f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
431212f080e7Smrj 
431312f080e7Smrj 		/*
431412f080e7Smrj 		 * the first page can get unmapped if it's shared with the
431512f080e7Smrj 		 * previous window. Even if the rest of this window is already
431612f080e7Smrj 		 * mapped in, we need to still check this one.
431712f080e7Smrj 		 */
431812f080e7Smrj 		pmap = &dma->dp_pgmap[pidx];
431912f080e7Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
432012f080e7Smrj 			if (pmap->pm_pp != NULL) {
432112f080e7Smrj 				pmap->pm_mapped = B_TRUE;
432212f080e7Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
432312f080e7Smrj 			} else if (pmap->pm_vaddr != NULL) {
432412f080e7Smrj 				pmap->pm_mapped = B_TRUE;
432512f080e7Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
432612f080e7Smrj 				    pmap->pm_kaddr);
432712f080e7Smrj 			}
432812f080e7Smrj 		}
432912f080e7Smrj 		pidx++;
433012f080e7Smrj 
433112f080e7Smrj 		/* map in the rest of the pages if required */
433212f080e7Smrj 		if (window->wd_remap_copybuf) {
433312f080e7Smrj 			window->wd_remap_copybuf = B_FALSE;
433412f080e7Smrj 
433512f080e7Smrj 			/* figure out many pages this window takes up */
433612f080e7Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
433712f080e7Smrj 			    MMU_PAGEOFFSET;
433812f080e7Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
433912f080e7Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
434012f080e7Smrj 
434112f080e7Smrj 			/* map pages which require it */
434212f080e7Smrj 			for (i = 1; i < pcnt; i++) {
434312f080e7Smrj 				pmap = &dma->dp_pgmap[pidx];
434412f080e7Smrj 				if (pmap->pm_uses_copybuf) {
434512f080e7Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
434612f080e7Smrj 					if (pmap->pm_pp != NULL) {
434712f080e7Smrj 						pmap->pm_mapped = B_TRUE;
434812f080e7Smrj 						i86_pp_map(pmap->pm_pp,
434912f080e7Smrj 						    pmap->pm_kaddr);
435012f080e7Smrj 					} else if (pmap->pm_vaddr != NULL) {
435112f080e7Smrj 						pmap->pm_mapped = B_TRUE;
435212f080e7Smrj 						i86_va_map(pmap->pm_vaddr,
435312f080e7Smrj 						    sinfo->si_asp,
435412f080e7Smrj 						    pmap->pm_kaddr);
435512f080e7Smrj 					}
435612f080e7Smrj 				}
435712f080e7Smrj 				pidx++;
435812f080e7Smrj 			}
435912f080e7Smrj 		}
436012f080e7Smrj 	}
436112f080e7Smrj #endif
436212f080e7Smrj 
436312f080e7Smrj 	/* if the new window uses the copy buffer, sync it for the device */
436412f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
436512f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
436612f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
436712f080e7Smrj 	}
436812f080e7Smrj 
436912f080e7Smrj 	return (DDI_SUCCESS);
437012f080e7Smrj }
437112f080e7Smrj 
4372*20906b23SVikram Hegde /*
4373*20906b23SVikram Hegde  * rootnex_dma_win()
4374*20906b23SVikram Hegde  *    called from ddi_dma_getwin()
4375*20906b23SVikram Hegde  */
4376*20906b23SVikram Hegde /*ARGSUSED*/
4377*20906b23SVikram Hegde static int
4378*20906b23SVikram Hegde rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4379*20906b23SVikram Hegde     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4380*20906b23SVikram Hegde     uint_t *ccountp)
4381*20906b23SVikram Hegde {
4382*20906b23SVikram Hegde #if !defined(__xpv)
4383*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
4384*20906b23SVikram Hegde 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
4385*20906b23SVikram Hegde 		    cookiep, ccountp));
4386*20906b23SVikram Hegde 	}
4387*20906b23SVikram Hegde #endif
438812f080e7Smrj 
4389*20906b23SVikram Hegde 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
4390*20906b23SVikram Hegde 	    cookiep, ccountp));
4391*20906b23SVikram Hegde }
439212f080e7Smrj 
439312f080e7Smrj /*
439412f080e7Smrj  * ************************
439512f080e7Smrj  *  obsoleted dma routines
439612f080e7Smrj  * ************************
439712f080e7Smrj  */
439812f080e7Smrj 
439912f080e7Smrj /* ARGSUSED */
440012f080e7Smrj static int
4401*20906b23SVikram Hegde rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
4402*20906b23SVikram Hegde     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
440312f080e7Smrj {
440412f080e7Smrj #if defined(__amd64)
440512f080e7Smrj 	/*
440612f080e7Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
440712f080e7Smrj 	 * rootnex_dma_mctl()
440812f080e7Smrj 	 */
440912f080e7Smrj 	return (DDI_DMA_NORESOURCES);
441012f080e7Smrj 
441112f080e7Smrj #else /* 32-bit x86 kernel */
441212f080e7Smrj 	ddi_dma_handle_t *lhandlep;
441312f080e7Smrj 	ddi_dma_handle_t lhandle;
441412f080e7Smrj 	ddi_dma_cookie_t cookie;
441512f080e7Smrj 	ddi_dma_attr_t dma_attr;
441612f080e7Smrj 	ddi_dma_lim_t *dma_lim;
441712f080e7Smrj 	uint_t ccnt;
441812f080e7Smrj 	int e;
441912f080e7Smrj 
442012f080e7Smrj 
442112f080e7Smrj 	/*
442212f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
442312f080e7Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
442412f080e7Smrj 	 */
442512f080e7Smrj 	if (handlep == NULL) {
442612f080e7Smrj 		lhandlep = &lhandle;
442712f080e7Smrj 	} else {
442812f080e7Smrj 		lhandlep = handlep;
442912f080e7Smrj 	}
443012f080e7Smrj 
443112f080e7Smrj 	/* convert the limit structure to a dma_attr one */
443212f080e7Smrj 	dma_lim = dmareq->dmar_limits;
443312f080e7Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
443412f080e7Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
443512f080e7Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
443612f080e7Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
443712f080e7Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
443812f080e7Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
443912f080e7Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
444012f080e7Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
444112f080e7Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
444212f080e7Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
444312f080e7Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
444412f080e7Smrj 	dma_attr.dma_attr_flags = 0;
444512f080e7Smrj 
444612f080e7Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
444712f080e7Smrj 	    dmareq->dmar_arg, lhandlep);
444812f080e7Smrj 	if (e != DDI_SUCCESS) {
444912f080e7Smrj 		return (e);
445012f080e7Smrj 	}
445112f080e7Smrj 
445212f080e7Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
445312f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
445412f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
445512f080e7Smrj 		return (e);
445612f080e7Smrj 	}
445712f080e7Smrj 
445812f080e7Smrj 	/*
445912f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
446012f080e7Smrj 	 * free up the local state and return the result.
446112f080e7Smrj 	 */
446212f080e7Smrj 	if (handlep == NULL) {
446312f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
446412f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
446512f080e7Smrj 		if (e == DDI_DMA_MAPPED) {
446612f080e7Smrj 			return (DDI_DMA_MAPOK);
446712f080e7Smrj 		} else {
446812f080e7Smrj 			return (DDI_DMA_NOMAPPING);
446912f080e7Smrj 		}
447012f080e7Smrj 	}
447112f080e7Smrj 
447212f080e7Smrj 	return (e);
447312f080e7Smrj #endif /* defined(__amd64) */
447412f080e7Smrj }
447512f080e7Smrj 
4476*20906b23SVikram Hegde /*
4477*20906b23SVikram Hegde  * rootnex_dma_map()
4478*20906b23SVikram Hegde  *    called from ddi_dma_setup()
4479*20906b23SVikram Hegde  */
4480*20906b23SVikram Hegde /* ARGSUSED */
4481*20906b23SVikram Hegde static int
4482*20906b23SVikram Hegde rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
4483*20906b23SVikram Hegde     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4484*20906b23SVikram Hegde {
4485*20906b23SVikram Hegde #if !defined(__xpv)
4486*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
4487*20906b23SVikram Hegde 		return (iommulib_nexdma_map(dip, rdip, dmareq, handlep));
4488*20906b23SVikram Hegde 	}
4489*20906b23SVikram Hegde #endif
4490*20906b23SVikram Hegde 	return (rootnex_coredma_map(dip, rdip, dmareq, handlep));
4491*20906b23SVikram Hegde }
449212f080e7Smrj 
449312f080e7Smrj /*
449412f080e7Smrj  * rootnex_dma_mctl()
449512f080e7Smrj  *
449612f080e7Smrj  */
449712f080e7Smrj /* ARGSUSED */
449812f080e7Smrj static int
4499*20906b23SVikram Hegde rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
450012f080e7Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
450112f080e7Smrj     uint_t cache_flags)
450212f080e7Smrj {
450312f080e7Smrj #if defined(__amd64)
450412f080e7Smrj 	/*
450512f080e7Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
450612f080e7Smrj 	 * common implementation in genunix, so they no longer have x86
450712f080e7Smrj 	 * specific functionality which called into dma_ctl.
450812f080e7Smrj 	 *
450912f080e7Smrj 	 * The rest of the obsoleted interfaces were never supported in the
451012f080e7Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
451112f080e7Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
451212f080e7Smrj 	 * implementation issues.
451312f080e7Smrj 	 *
451412f080e7Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
451512f080e7Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
451612f080e7Smrj 	 * reflect that now too...
451712f080e7Smrj 	 *
451812f080e7Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
451912f080e7Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
452012f080e7Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
452112f080e7Smrj 	 * that in a future release.
452212f080e7Smrj 	 */
452312f080e7Smrj 	return (DDI_FAILURE);
452412f080e7Smrj 
452512f080e7Smrj #else /* 32-bit x86 kernel */
452612f080e7Smrj 	ddi_dma_cookie_t lcookie;
452712f080e7Smrj 	ddi_dma_cookie_t *cookie;
452812f080e7Smrj 	rootnex_window_t *window;
452912f080e7Smrj 	ddi_dma_impl_t *hp;
453012f080e7Smrj 	rootnex_dma_t *dma;
453112f080e7Smrj 	uint_t nwin;
453212f080e7Smrj 	uint_t ccnt;
453312f080e7Smrj 	size_t len;
453412f080e7Smrj 	off_t off;
453512f080e7Smrj 	int e;
453612f080e7Smrj 
453712f080e7Smrj 
453812f080e7Smrj 	/*
453912f080e7Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
454012f080e7Smrj 	 * hacky since were optimizing for the current interfaces and so we can
454112f080e7Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
454212f080e7Smrj 	 * obsoleted routines someday soon.
454312f080e7Smrj 	 */
454412f080e7Smrj 
454512f080e7Smrj 	switch (request) {
454612f080e7Smrj 
454712f080e7Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
454812f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
454912f080e7Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
455012f080e7Smrj 
455112f080e7Smrj 		/*
455212f080e7Smrj 		 * convert segment to cookie. We don't distinguish between the
455312f080e7Smrj 		 * two :-)
455412f080e7Smrj 		 */
455512f080e7Smrj 		*cookie = *hp->dmai_cookie;
455612f080e7Smrj 		*lenp = cookie->dmac_size;
455712f080e7Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
455812f080e7Smrj 		return (DDI_SUCCESS);
455912f080e7Smrj 
456012f080e7Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
456112f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
456212f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
456312f080e7Smrj 
456412f080e7Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
456512f080e7Smrj 			return (DDI_DMA_STALE);
456612f080e7Smrj 		}
456712f080e7Smrj 
456812f080e7Smrj 		/* handle the case where we don't have any windows */
456912f080e7Smrj 		if (dma->dp_window == NULL) {
457012f080e7Smrj 			/*
457112f080e7Smrj 			 * if seg == NULL, and we don't have any windows,
457212f080e7Smrj 			 * return the first cookie in the sgl.
457312f080e7Smrj 			 */
457412f080e7Smrj 			if (*lenp == NULL) {
457512f080e7Smrj 				dma->dp_current_cookie = 0;
457612f080e7Smrj 				hp->dmai_cookie = dma->dp_cookies;
457712f080e7Smrj 				*objpp = (caddr_t)handle;
457812f080e7Smrj 				return (DDI_SUCCESS);
457912f080e7Smrj 
458012f080e7Smrj 			/* if we have more cookies, go to the next cookie */
458112f080e7Smrj 			} else {
458212f080e7Smrj 				if ((dma->dp_current_cookie + 1) >=
458312f080e7Smrj 				    dma->dp_sglinfo.si_sgl_size) {
458412f080e7Smrj 					return (DDI_DMA_DONE);
458512f080e7Smrj 				}
458612f080e7Smrj 				dma->dp_current_cookie++;
458712f080e7Smrj 				hp->dmai_cookie++;
458812f080e7Smrj 				return (DDI_SUCCESS);
458912f080e7Smrj 			}
459012f080e7Smrj 		}
459112f080e7Smrj 
459212f080e7Smrj 		/* We have one or more windows */
459312f080e7Smrj 		window = &dma->dp_window[dma->dp_current_win];
459412f080e7Smrj 
459512f080e7Smrj 		/*
459612f080e7Smrj 		 * if seg == NULL, return the first cookie in the current
459712f080e7Smrj 		 * window
459812f080e7Smrj 		 */
459912f080e7Smrj 		if (*lenp == NULL) {
460012f080e7Smrj 			dma->dp_current_cookie = 0;
4601cf4e9a1dSmrj 			hp->dmai_cookie = window->wd_first_cookie;
460212f080e7Smrj 
460312f080e7Smrj 		/*
460412f080e7Smrj 		 * go to the next cookie in the window then see if we done with
460512f080e7Smrj 		 * this window.
460612f080e7Smrj 		 */
460712f080e7Smrj 		} else {
460812f080e7Smrj 			if ((dma->dp_current_cookie + 1) >=
460912f080e7Smrj 			    window->wd_cookie_cnt) {
461012f080e7Smrj 				return (DDI_DMA_DONE);
461112f080e7Smrj 			}
461212f080e7Smrj 			dma->dp_current_cookie++;
461312f080e7Smrj 			hp->dmai_cookie++;
461412f080e7Smrj 		}
461512f080e7Smrj 		*objpp = (caddr_t)handle;
461612f080e7Smrj 		return (DDI_SUCCESS);
461712f080e7Smrj 
461812f080e7Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
461912f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
462012f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
462112f080e7Smrj 
462212f080e7Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
462312f080e7Smrj 			return (DDI_DMA_STALE);
462412f080e7Smrj 		}
462512f080e7Smrj 
462612f080e7Smrj 		/* if win == NULL, return the first window in the bind */
462712f080e7Smrj 		if (*offp == NULL) {
462812f080e7Smrj 			nwin = 0;
462912f080e7Smrj 
463012f080e7Smrj 		/*
463112f080e7Smrj 		 * else, go to the next window then see if we're done with all
463212f080e7Smrj 		 * the windows.
463312f080e7Smrj 		 */
463412f080e7Smrj 		} else {
463512f080e7Smrj 			nwin = dma->dp_current_win + 1;
463612f080e7Smrj 			if (nwin >= hp->dmai_nwin) {
463712f080e7Smrj 				return (DDI_DMA_DONE);
463812f080e7Smrj 			}
463912f080e7Smrj 		}
464012f080e7Smrj 
464112f080e7Smrj 		/* switch to the next window */
464212f080e7Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
464312f080e7Smrj 		    &lcookie, &ccnt);
464412f080e7Smrj 		ASSERT(e == DDI_SUCCESS);
464512f080e7Smrj 		if (e != DDI_SUCCESS) {
464612f080e7Smrj 			return (DDI_DMA_STALE);
464712f080e7Smrj 		}
464812f080e7Smrj 
464912f080e7Smrj 		/* reset the cookie back to the first cookie in the window */
465012f080e7Smrj 		if (dma->dp_window != NULL) {
465112f080e7Smrj 			window = &dma->dp_window[dma->dp_current_win];
465212f080e7Smrj 			hp->dmai_cookie = window->wd_first_cookie;
465312f080e7Smrj 		} else {
465412f080e7Smrj 			hp->dmai_cookie = dma->dp_cookies;
465512f080e7Smrj 		}
465612f080e7Smrj 
465712f080e7Smrj 		*objpp = (caddr_t)handle;
465812f080e7Smrj 		return (DDI_SUCCESS);
465912f080e7Smrj 
466012f080e7Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
466112f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
466212f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
466312f080e7Smrj 		if (rootnex_state->r_dvma_call_list_id) {
466412f080e7Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
466512f080e7Smrj 		}
466612f080e7Smrj 		return (DDI_SUCCESS);
466712f080e7Smrj 
466812f080e7Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
466912f080e7Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
467012f080e7Smrj 		/* should never get here, handled in genunix */
467112f080e7Smrj 		ASSERT(0);
467212f080e7Smrj 		return (DDI_FAILURE);
467312f080e7Smrj 
467412f080e7Smrj 	case DDI_DMA_KVADDR:
467512f080e7Smrj 	case DDI_DMA_GETERR:
467612f080e7Smrj 	case DDI_DMA_COFF:
467712f080e7Smrj 		return (DDI_FAILURE);
467812f080e7Smrj 	}
467912f080e7Smrj 
468012f080e7Smrj 	return (DDI_FAILURE);
468112f080e7Smrj #endif /* defined(__amd64) */
46827c478bd9Sstevel@tonic-gate }
46837aec1d6eScindi 
4684*20906b23SVikram Hegde /*
4685*20906b23SVikram Hegde  * rootnex_dma_mctl()
4686*20906b23SVikram Hegde  *
4687*20906b23SVikram Hegde  */
4688*20906b23SVikram Hegde /* ARGSUSED */
4689*20906b23SVikram Hegde static int
4690*20906b23SVikram Hegde rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4691*20906b23SVikram Hegde     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4692*20906b23SVikram Hegde     uint_t cache_flags)
4693*20906b23SVikram Hegde {
4694*20906b23SVikram Hegde #if !defined(__xpv)
4695*20906b23SVikram Hegde 	if (IOMMU_USED(rdip)) {
4696*20906b23SVikram Hegde 		return (iommulib_nexdma_mctl(dip, rdip, handle, request, offp,
4697*20906b23SVikram Hegde 		    lenp, objpp, cache_flags));
4698*20906b23SVikram Hegde 	}
4699*20906b23SVikram Hegde #endif
4700*20906b23SVikram Hegde 
4701*20906b23SVikram Hegde 	return (rootnex_coredma_mctl(dip, rdip, handle, request, offp,
4702*20906b23SVikram Hegde 	    lenp, objpp, cache_flags));
4703*20906b23SVikram Hegde }
470400d0963fSdilpreet 
470500d0963fSdilpreet /*
470600d0963fSdilpreet  * *********
470700d0963fSdilpreet  *  FMA Code
470800d0963fSdilpreet  * *********
470900d0963fSdilpreet  */
471000d0963fSdilpreet 
471100d0963fSdilpreet /*
471200d0963fSdilpreet  * rootnex_fm_init()
471300d0963fSdilpreet  *    FMA init busop
471400d0963fSdilpreet  */
47157aec1d6eScindi /* ARGSUSED */
47167aec1d6eScindi static int
471700d0963fSdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
471800d0963fSdilpreet     ddi_iblock_cookie_t *ibc)
47197aec1d6eScindi {
472000d0963fSdilpreet 	*ibc = rootnex_state->r_err_ibc;
472100d0963fSdilpreet 
472200d0963fSdilpreet 	return (ddi_system_fmcap);
472300d0963fSdilpreet }
472400d0963fSdilpreet 
472500d0963fSdilpreet /*
472600d0963fSdilpreet  * rootnex_dma_check()
472700d0963fSdilpreet  *    Function called after a dma fault occurred to find out whether the
472800d0963fSdilpreet  *    fault address is associated with a driver that is able to handle faults
472900d0963fSdilpreet  *    and recover from faults.
473000d0963fSdilpreet  */
473100d0963fSdilpreet /* ARGSUSED */
473200d0963fSdilpreet static int
473300d0963fSdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
473400d0963fSdilpreet     const void *not_used)
473500d0963fSdilpreet {
473600d0963fSdilpreet 	rootnex_window_t *window;
473700d0963fSdilpreet 	uint64_t start_addr;
473800d0963fSdilpreet 	uint64_t fault_addr;
473900d0963fSdilpreet 	ddi_dma_impl_t *hp;
474000d0963fSdilpreet 	rootnex_dma_t *dma;
474100d0963fSdilpreet 	uint64_t end_addr;
474200d0963fSdilpreet 	size_t csize;
474300d0963fSdilpreet 	int i;
474400d0963fSdilpreet 	int j;
474500d0963fSdilpreet 
474600d0963fSdilpreet 
474700d0963fSdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
474800d0963fSdilpreet 	hp = (ddi_dma_impl_t *)handle;
474900d0963fSdilpreet 	ASSERT(hp);
475000d0963fSdilpreet 
475100d0963fSdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
475200d0963fSdilpreet 
475300d0963fSdilpreet 	/* Get the address that we need to search for */
475400d0963fSdilpreet 	fault_addr = *(uint64_t *)addr;
475500d0963fSdilpreet 
475600d0963fSdilpreet 	/*
475700d0963fSdilpreet 	 * if we don't have any windows, we can just walk through all the
475800d0963fSdilpreet 	 * cookies.
475900d0963fSdilpreet 	 */
476000d0963fSdilpreet 	if (dma->dp_window == NULL) {
476100d0963fSdilpreet 		/* for each cookie */
476200d0963fSdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
476300d0963fSdilpreet 			/*
476400d0963fSdilpreet 			 * if the faulted address is within the physical address
476500d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
476600d0963fSdilpreet 			 */
476700d0963fSdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
476800d0963fSdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
476900d0963fSdilpreet 			    dma->dp_cookies[i].dmac_size))) {
477000d0963fSdilpreet 				return (DDI_FM_NONFATAL);
477100d0963fSdilpreet 			}
477200d0963fSdilpreet 		}
477300d0963fSdilpreet 
477400d0963fSdilpreet 		/* fault_addr not within this DMA handle */
477500d0963fSdilpreet 		return (DDI_FM_UNKNOWN);
477600d0963fSdilpreet 	}
477700d0963fSdilpreet 
477800d0963fSdilpreet 	/* we have mutiple windows, walk through each window */
477900d0963fSdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
478000d0963fSdilpreet 		window = &dma->dp_window[i];
478100d0963fSdilpreet 
478200d0963fSdilpreet 		/* Go through all the cookies in the window */
478300d0963fSdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
478400d0963fSdilpreet 
478500d0963fSdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
478600d0963fSdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
478700d0963fSdilpreet 
478800d0963fSdilpreet 			/*
478900d0963fSdilpreet 			 * if we are trimming the first cookie in the window,
479000d0963fSdilpreet 			 * and this is the first cookie, adjust the start
479100d0963fSdilpreet 			 * address and size of the cookie to account for the
479200d0963fSdilpreet 			 * trim.
479300d0963fSdilpreet 			 */
479400d0963fSdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
479500d0963fSdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
479600d0963fSdilpreet 				csize = window->wd_trim.tr_first_size;
479700d0963fSdilpreet 			}
479800d0963fSdilpreet 
479900d0963fSdilpreet 			/*
480000d0963fSdilpreet 			 * if we are trimming the last cookie in the window,
480100d0963fSdilpreet 			 * and this is the last cookie, adjust the start
480200d0963fSdilpreet 			 * address and size of the cookie to account for the
480300d0963fSdilpreet 			 * trim.
480400d0963fSdilpreet 			 */
480500d0963fSdilpreet 			if (window->wd_trim.tr_trim_last &&
480600d0963fSdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
480700d0963fSdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
480800d0963fSdilpreet 				csize = window->wd_trim.tr_last_size;
480900d0963fSdilpreet 			}
481000d0963fSdilpreet 
481100d0963fSdilpreet 			end_addr = start_addr + csize;
481200d0963fSdilpreet 
481300d0963fSdilpreet 			/*
481400d0963fSdilpreet 			 * if the faulted address is within the physical address
481500d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
481600d0963fSdilpreet 			 */
481700d0963fSdilpreet 			if ((fault_addr >= start_addr) &&
481800d0963fSdilpreet 			    (fault_addr <= end_addr)) {
481900d0963fSdilpreet 				return (DDI_FM_NONFATAL);
482000d0963fSdilpreet 			}
482100d0963fSdilpreet 		}
482200d0963fSdilpreet 	}
482300d0963fSdilpreet 
482400d0963fSdilpreet 	/* fault_addr not within this DMA handle */
482500d0963fSdilpreet 	return (DDI_FM_UNKNOWN);
48267aec1d6eScindi }
4827