xref: /titanic_54/usr/src/uts/i86pc/io/rootnex.c (revision a54f81fbf66e3e4c14a94d571c0fe241e4cf2394)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
500d0963fSdilpreet  * Common Development and Distribution License (the "License").
600d0963fSdilpreet  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
228a552b2dScth  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
2912f080e7Smrj  * x86 root nexus driver
307c478bd9Sstevel@tonic-gate  */
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
337c478bd9Sstevel@tonic-gate #include <sys/conf.h>
347c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
357c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
367c478bd9Sstevel@tonic-gate #include <sys/debug.h>
377c478bd9Sstevel@tonic-gate #include <sys/psw.h>
387c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h>
397c478bd9Sstevel@tonic-gate #include <sys/promif.h>
407c478bd9Sstevel@tonic-gate #include <sys/devops.h>
417c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
427c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
437c478bd9Sstevel@tonic-gate #include <vm/seg.h>
447c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
457c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h>
467c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
477c478bd9Sstevel@tonic-gate #include <sys/mman.h>
487c478bd9Sstevel@tonic-gate #include <vm/hat.h>
497c478bd9Sstevel@tonic-gate #include <vm/as.h>
507c478bd9Sstevel@tonic-gate #include <vm/page.h>
517c478bd9Sstevel@tonic-gate #include <sys/avintr.h>
527c478bd9Sstevel@tonic-gate #include <sys/errno.h>
537c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
547c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
557c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
567c478bd9Sstevel@tonic-gate #include <sys/sunndi.h>
577a364d25Sschwartz #include <sys/mach_intr.h>
587c478bd9Sstevel@tonic-gate #include <sys/psm.h>
597c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
6012f080e7Smrj #include <sys/atomic.h>
6112f080e7Smrj #include <sys/sdt.h>
6212f080e7Smrj #include <sys/rootnex.h>
6312f080e7Smrj #include <vm/hat_i86.h>
6400d0963fSdilpreet #include <sys/ddifm.h>
657c478bd9Sstevel@tonic-gate 
6612f080e7Smrj /*
6712f080e7Smrj  * enable/disable extra checking of function parameters. Useful for debugging
6812f080e7Smrj  * drivers.
6912f080e7Smrj  */
7012f080e7Smrj #ifdef	DEBUG
7112f080e7Smrj int rootnex_alloc_check_parms = 1;
7212f080e7Smrj int rootnex_bind_check_parms = 1;
7312f080e7Smrj int rootnex_bind_check_inuse = 1;
7412f080e7Smrj int rootnex_unbind_verify_buffer = 0;
7512f080e7Smrj int rootnex_sync_check_parms = 1;
7612f080e7Smrj #else
7712f080e7Smrj int rootnex_alloc_check_parms = 0;
7812f080e7Smrj int rootnex_bind_check_parms = 0;
7912f080e7Smrj int rootnex_bind_check_inuse = 0;
8012f080e7Smrj int rootnex_unbind_verify_buffer = 0;
8112f080e7Smrj int rootnex_sync_check_parms = 0;
8212f080e7Smrj #endif
837c478bd9Sstevel@tonic-gate 
847aec1d6eScindi /* Master Abort and Target Abort panic flag */
857aec1d6eScindi int rootnex_fm_ma_ta_panic_flag = 0;
867aec1d6eScindi 
8712f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
887c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1;
897c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1;
907c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list;
917c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
927c478bd9Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
937c478bd9Sstevel@tonic-gate 
947c478bd9Sstevel@tonic-gate /*
9512f080e7Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
9612f080e7Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
9712f080e7Smrj  * the offset and size into ddi_dma_sync().
987c478bd9Sstevel@tonic-gate  */
9912f080e7Smrj int rootnex_sync_ignore_params = 0;
1007c478bd9Sstevel@tonic-gate 
1017c478bd9Sstevel@tonic-gate /*
10212f080e7Smrj  * maximum size that we will allow for a copy buffer. Can be patched on the
10312f080e7Smrj  * fly
1047c478bd9Sstevel@tonic-gate  */
10512f080e7Smrj size_t rootnex_max_copybuf_size = 0x100000;
1067c478bd9Sstevel@tonic-gate 
1077c478bd9Sstevel@tonic-gate /*
10812f080e7Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
10912f080e7Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
11012f080e7Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
11112f080e7Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
11212f080e7Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
11312f080e7Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
11412f080e7Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
11512f080e7Smrj  * attach.
1167c478bd9Sstevel@tonic-gate  */
11712f080e7Smrj #if defined(__amd64)
11812f080e7Smrj int rootnex_prealloc_cookies = 65;
11912f080e7Smrj int rootnex_prealloc_windows = 4;
12012f080e7Smrj int rootnex_prealloc_copybuf = 2;
12112f080e7Smrj #else
12212f080e7Smrj int rootnex_prealloc_cookies = 33;
12312f080e7Smrj int rootnex_prealloc_windows = 4;
12412f080e7Smrj int rootnex_prealloc_copybuf = 2;
12512f080e7Smrj #endif
1267c478bd9Sstevel@tonic-gate 
12712f080e7Smrj /* driver global state */
12812f080e7Smrj static rootnex_state_t *rootnex_state;
12912f080e7Smrj 
13012f080e7Smrj /* shortcut to rootnex counters */
13112f080e7Smrj static uint64_t *rootnex_cnt;
1327c478bd9Sstevel@tonic-gate 
1337c478bd9Sstevel@tonic-gate /*
13412f080e7Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1357c478bd9Sstevel@tonic-gate  */
13612f080e7Smrj /* statically defined integer/boolean properties for the root node */
13712f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = {
13812f080e7Smrj 	{ "PAGESIZE",			PAGESIZE },
13912f080e7Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
14012f080e7Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
14112f080e7Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
14212f080e7Smrj };
14312f080e7Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 
14612f080e7Smrj static struct cb_ops rootnex_cb_ops = {
14712f080e7Smrj 	nodev,		/* open */
14812f080e7Smrj 	nodev,		/* close */
14912f080e7Smrj 	nodev,		/* strategy */
15012f080e7Smrj 	nodev,		/* print */
15112f080e7Smrj 	nodev,		/* dump */
15212f080e7Smrj 	nodev,		/* read */
15312f080e7Smrj 	nodev,		/* write */
15412f080e7Smrj 	nodev,		/* ioctl */
15512f080e7Smrj 	nodev,		/* devmap */
15612f080e7Smrj 	nodev,		/* mmap */
15712f080e7Smrj 	nodev,		/* segmap */
15812f080e7Smrj 	nochpoll,	/* chpoll */
15912f080e7Smrj 	ddi_prop_op,	/* cb_prop_op */
16012f080e7Smrj 	NULL,		/* struct streamtab */
16112f080e7Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
16212f080e7Smrj 	CB_REV,		/* Rev */
16312f080e7Smrj 	nodev,		/* cb_aread */
16412f080e7Smrj 	nodev		/* cb_awrite */
16512f080e7Smrj };
1667c478bd9Sstevel@tonic-gate 
16712f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1687c478bd9Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
16912f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1707c478bd9Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1717c478bd9Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
17212f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1737c478bd9Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
17412f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
17512f080e7Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
17612f080e7Smrj     ddi_dma_handle_t *handlep);
17712f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
17812f080e7Smrj     ddi_dma_handle_t handle);
17912f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
18012f080e7Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
18112f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
18212f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
18312f080e7Smrj     ddi_dma_handle_t handle);
18412f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
18512f080e7Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
18612f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
18712f080e7Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
18812f080e7Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
18912f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1907c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1917c478bd9Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
19212f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
19312f080e7Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
19400d0963fSdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
19500d0963fSdilpreet     ddi_iblock_cookie_t *ibc);
19612f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
19712f080e7Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
1987c478bd9Sstevel@tonic-gate 
1997c478bd9Sstevel@tonic-gate 
2007c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2017c478bd9Sstevel@tonic-gate 	BUSO_REV,
2027c478bd9Sstevel@tonic-gate 	rootnex_map,
2037c478bd9Sstevel@tonic-gate 	NULL,
2047c478bd9Sstevel@tonic-gate 	NULL,
2057c478bd9Sstevel@tonic-gate 	NULL,
2067c478bd9Sstevel@tonic-gate 	rootnex_map_fault,
2077c478bd9Sstevel@tonic-gate 	rootnex_dma_map,
2087c478bd9Sstevel@tonic-gate 	rootnex_dma_allochdl,
2097c478bd9Sstevel@tonic-gate 	rootnex_dma_freehdl,
2107c478bd9Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2117c478bd9Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
21212f080e7Smrj 	rootnex_dma_sync,
2137c478bd9Sstevel@tonic-gate 	rootnex_dma_win,
2147c478bd9Sstevel@tonic-gate 	rootnex_dma_mctl,
2157c478bd9Sstevel@tonic-gate 	rootnex_ctlops,
2167c478bd9Sstevel@tonic-gate 	ddi_bus_prop_op,
2177c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2187c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2197c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2207c478bd9Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2217c478bd9Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2227c478bd9Sstevel@tonic-gate 	0,			/* bus_config */
2237c478bd9Sstevel@tonic-gate 	0,			/* bus_unconfig */
22400d0963fSdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2257c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2267c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2277c478bd9Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2287c478bd9Sstevel@tonic-gate 	NULL,			/* bus_powr */
2297c478bd9Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2307c478bd9Sstevel@tonic-gate };
2317c478bd9Sstevel@tonic-gate 
23212f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
23312f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2347c478bd9Sstevel@tonic-gate 
2357c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2367c478bd9Sstevel@tonic-gate 	DEVO_REV,
23712f080e7Smrj 	0,
23812f080e7Smrj 	ddi_no_info,
2397c478bd9Sstevel@tonic-gate 	nulldev,
24012f080e7Smrj 	nulldev,
2417c478bd9Sstevel@tonic-gate 	rootnex_attach,
24212f080e7Smrj 	rootnex_detach,
24312f080e7Smrj 	nulldev,
24412f080e7Smrj 	&rootnex_cb_ops,
2457c478bd9Sstevel@tonic-gate 	&rootnex_bus_ops
2467c478bd9Sstevel@tonic-gate };
2477c478bd9Sstevel@tonic-gate 
24812f080e7Smrj static struct modldrv rootnex_modldrv = {
24912f080e7Smrj 	&mod_driverops,
2507c478bd9Sstevel@tonic-gate 	"i86pc root nexus %I%",
25112f080e7Smrj 	&rootnex_ops
2527c478bd9Sstevel@tonic-gate };
2537c478bd9Sstevel@tonic-gate 
25412f080e7Smrj static struct modlinkage rootnex_modlinkage = {
25512f080e7Smrj 	MODREV_1,
25612f080e7Smrj 	(void *)&rootnex_modldrv,
25712f080e7Smrj 	NULL
2587c478bd9Sstevel@tonic-gate };
2597c478bd9Sstevel@tonic-gate 
2607c478bd9Sstevel@tonic-gate 
26112f080e7Smrj /*
26212f080e7Smrj  *  extern hacks
26312f080e7Smrj  */
26412f080e7Smrj extern struct seg_ops segdev_ops;
26512f080e7Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
26612f080e7Smrj #ifdef	DDI_MAP_DEBUG
26712f080e7Smrj extern int ddi_map_debug_flag;
26812f080e7Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
26912f080e7Smrj #endif
27012f080e7Smrj #define	ptob64(x)	(((uint64_t)(x)) << MMU_PAGESHIFT)
27112f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
27212f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
27312f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
27412f080e7Smrj     psm_intr_op_t, int *);
27512f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
27612f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
27712f080e7Smrj /*
27812f080e7Smrj  * Use device arena to use for device control register mappings.
27912f080e7Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
28012f080e7Smrj  * to avoid this address range to prevent undesired device activity.
28112f080e7Smrj  */
28212f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
28312f080e7Smrj extern void device_arena_free(void * vaddr, size_t size);
28412f080e7Smrj 
28512f080e7Smrj 
28612f080e7Smrj /*
28712f080e7Smrj  *  Internal functions
28812f080e7Smrj  */
28912f080e7Smrj static int rootnex_dma_init();
29012f080e7Smrj static void rootnex_add_props(dev_info_t *);
29112f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
29212f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
29312f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
29412f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
29512f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
29612f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
29712f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
29812f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
29912f080e7Smrj     ddi_dma_attr_t *attr);
30012f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
30112f080e7Smrj     rootnex_sglinfo_t *sglinfo);
30212f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
30312f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
30412f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
30512f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
30612f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
30712f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
30812f080e7Smrj     ddi_dma_attr_t *attr, int kmflag);
30912f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
31012f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
31112f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
31212f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
31312f080e7Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
31412f080e7Smrj     size_t *copybuf_used, page_t **cur_pp);
31512f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
31612f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
31712f080e7Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
31812f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
31912f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
32012f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
32112f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
32212f080e7Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
32312f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
32412f080e7Smrj     off_t offset, size_t size, uint_t cache_flags);
32512f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
32600d0963fSdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
32700d0963fSdilpreet     const void *comp_addr, const void *not_used);
32812f080e7Smrj 
32912f080e7Smrj /*
33012f080e7Smrj  * _init()
33112f080e7Smrj  *
33212f080e7Smrj  */
3337c478bd9Sstevel@tonic-gate int
3347c478bd9Sstevel@tonic-gate _init(void)
3357c478bd9Sstevel@tonic-gate {
33612f080e7Smrj 
33712f080e7Smrj 	rootnex_state = NULL;
33812f080e7Smrj 	return (mod_install(&rootnex_modlinkage));
3397c478bd9Sstevel@tonic-gate }
3407c478bd9Sstevel@tonic-gate 
34112f080e7Smrj 
34212f080e7Smrj /*
34312f080e7Smrj  * _info()
34412f080e7Smrj  *
34512f080e7Smrj  */
34612f080e7Smrj int
34712f080e7Smrj _info(struct modinfo *modinfop)
34812f080e7Smrj {
34912f080e7Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
35012f080e7Smrj }
35112f080e7Smrj 
35212f080e7Smrj 
35312f080e7Smrj /*
35412f080e7Smrj  * _fini()
35512f080e7Smrj  *
35612f080e7Smrj  */
3577c478bd9Sstevel@tonic-gate int
3587c478bd9Sstevel@tonic-gate _fini(void)
3597c478bd9Sstevel@tonic-gate {
3607c478bd9Sstevel@tonic-gate 	return (EBUSY);
3617c478bd9Sstevel@tonic-gate }
3627c478bd9Sstevel@tonic-gate 
36312f080e7Smrj 
36412f080e7Smrj /*
36512f080e7Smrj  * rootnex_attach()
36612f080e7Smrj  *
36712f080e7Smrj  */
36812f080e7Smrj static int
36912f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3707c478bd9Sstevel@tonic-gate {
3717aec1d6eScindi 	int fmcap;
37212f080e7Smrj 	int e;
37312f080e7Smrj 
37412f080e7Smrj 
37512f080e7Smrj 	switch (cmd) {
37612f080e7Smrj 	case DDI_ATTACH:
37712f080e7Smrj 		break;
37812f080e7Smrj 	case DDI_RESUME:
37912f080e7Smrj 		return (DDI_SUCCESS);
38012f080e7Smrj 	default:
38112f080e7Smrj 		return (DDI_FAILURE);
3827c478bd9Sstevel@tonic-gate 	}
3837c478bd9Sstevel@tonic-gate 
3847c478bd9Sstevel@tonic-gate 	/*
38512f080e7Smrj 	 * We should only have one instance of rootnex. Save it away since we
38612f080e7Smrj 	 * don't have an easy way to get it back later.
3877c478bd9Sstevel@tonic-gate 	 */
38812f080e7Smrj 	ASSERT(rootnex_state == NULL);
38912f080e7Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
3907c478bd9Sstevel@tonic-gate 
39112f080e7Smrj 	rootnex_state->r_dip = dip;
3927aec1d6eScindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
39312f080e7Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
39412f080e7Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
3957c478bd9Sstevel@tonic-gate 
3967aec1d6eScindi 	/*
3977aec1d6eScindi 	 * Set minimum fm capability level for i86pc platforms and then
3987aec1d6eScindi 	 * initialize error handling. Since we're the rootnex, we don't
3997aec1d6eScindi 	 * care what's returned in the fmcap field.
4007aec1d6eScindi 	 */
40100d0963fSdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
40200d0963fSdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4037aec1d6eScindi 	fmcap = ddi_system_fmcap;
4047aec1d6eScindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4057aec1d6eScindi 
40612f080e7Smrj 	/* initialize DMA related state */
40712f080e7Smrj 	e = rootnex_dma_init();
40812f080e7Smrj 	if (e != DDI_SUCCESS) {
40912f080e7Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
41012f080e7Smrj 		return (DDI_FAILURE);
41112f080e7Smrj 	}
41212f080e7Smrj 
41312f080e7Smrj 	/* Add static root node properties */
41412f080e7Smrj 	rootnex_add_props(dip);
41512f080e7Smrj 
41612f080e7Smrj 	/* since we can't call ddi_report_dev() */
41712f080e7Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
41812f080e7Smrj 
41912f080e7Smrj 	/* Initialize rootnex event handle */
42012f080e7Smrj 	i_ddi_rootnex_init_events(dip);
42112f080e7Smrj 
42212f080e7Smrj 	return (DDI_SUCCESS);
42312f080e7Smrj }
42412f080e7Smrj 
42512f080e7Smrj 
42612f080e7Smrj /*
42712f080e7Smrj  * rootnex_detach()
42812f080e7Smrj  *
42912f080e7Smrj  */
4307c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4317c478bd9Sstevel@tonic-gate static int
43212f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4337c478bd9Sstevel@tonic-gate {
43412f080e7Smrj 	switch (cmd) {
43512f080e7Smrj 	case DDI_SUSPEND:
43612f080e7Smrj 		break;
43712f080e7Smrj 	default:
43812f080e7Smrj 		return (DDI_FAILURE);
43912f080e7Smrj 	}
4407c478bd9Sstevel@tonic-gate 
44112f080e7Smrj 	return (DDI_SUCCESS);
44212f080e7Smrj }
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 
44512f080e7Smrj /*
44612f080e7Smrj  * rootnex_dma_init()
44712f080e7Smrj  *
44812f080e7Smrj  */
44912f080e7Smrj /*ARGSUSED*/
45012f080e7Smrj static int
45112f080e7Smrj rootnex_dma_init()
45212f080e7Smrj {
45312f080e7Smrj 	size_t bufsize;
45412f080e7Smrj 
45512f080e7Smrj 
45612f080e7Smrj 	/*
45712f080e7Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
45812f080e7Smrj 	 * pre-alloc in dma_alloc_handle
45912f080e7Smrj 	 */
46012f080e7Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
46112f080e7Smrj 	rootnex_state->r_prealloc_size =
46212f080e7Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
46312f080e7Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
46412f080e7Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
46512f080e7Smrj 
46612f080e7Smrj 	/*
46712f080e7Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
46812f080e7Smrj 	 * allocate 16 extra bytes for struct pointer alignment
46912f080e7Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
47012f080e7Smrj 	 */
47112f080e7Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
47212f080e7Smrj 	    rootnex_state->r_prealloc_size + 0x10;
47312f080e7Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
47412f080e7Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
47512f080e7Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
47612f080e7Smrj 		return (DDI_FAILURE);
47712f080e7Smrj 	}
4787c478bd9Sstevel@tonic-gate 
4797c478bd9Sstevel@tonic-gate 	/*
4807c478bd9Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
4817c478bd9Sstevel@tonic-gate 	 * for.
4827c478bd9Sstevel@tonic-gate 	 */
4837c478bd9Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
4847c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
4857c478bd9Sstevel@tonic-gate 
4867c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
4877c478bd9Sstevel@tonic-gate }
4887c478bd9Sstevel@tonic-gate 
4897c478bd9Sstevel@tonic-gate 
4907c478bd9Sstevel@tonic-gate /*
49112f080e7Smrj  * rootnex_add_props()
49212f080e7Smrj  *
4937c478bd9Sstevel@tonic-gate  */
4947c478bd9Sstevel@tonic-gate static void
49512f080e7Smrj rootnex_add_props(dev_info_t *dip)
4967c478bd9Sstevel@tonic-gate {
49712f080e7Smrj 	rootnex_intprop_t *rpp;
4987c478bd9Sstevel@tonic-gate 	int i;
4997c478bd9Sstevel@tonic-gate 
50012f080e7Smrj 	/* Add static integer/boolean properties to the root node */
50112f080e7Smrj 	rpp = rootnex_intprp;
50212f080e7Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
50312f080e7Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
50412f080e7Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
50512f080e7Smrj 	}
5067c478bd9Sstevel@tonic-gate }
5077c478bd9Sstevel@tonic-gate 
50812f080e7Smrj 
50912f080e7Smrj 
5107c478bd9Sstevel@tonic-gate /*
51112f080e7Smrj  * *************************
51212f080e7Smrj  *  ctlops related routines
51312f080e7Smrj  * *************************
51412f080e7Smrj  */
51512f080e7Smrj 
51612f080e7Smrj /*
51712f080e7Smrj  * rootnex_ctlops()
5187c478bd9Sstevel@tonic-gate  *
5197c478bd9Sstevel@tonic-gate  */
520a195726fSgovinda /*ARGSUSED*/
5217c478bd9Sstevel@tonic-gate static int
52212f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
52312f080e7Smrj     void *arg, void *result)
5247c478bd9Sstevel@tonic-gate {
52512f080e7Smrj 	int n, *ptr;
52612f080e7Smrj 	struct ddi_parent_private_data *pdp;
5277c478bd9Sstevel@tonic-gate 
52812f080e7Smrj 	switch (ctlop) {
52912f080e7Smrj 	case DDI_CTLOPS_DMAPMAPC:
5307c478bd9Sstevel@tonic-gate 		/*
53112f080e7Smrj 		 * Return 'partial' to indicate that dma mapping
53212f080e7Smrj 		 * has to be done in the main MMU.
5337c478bd9Sstevel@tonic-gate 		 */
53412f080e7Smrj 		return (DDI_DMA_PARTIAL);
5357c478bd9Sstevel@tonic-gate 
53612f080e7Smrj 	case DDI_CTLOPS_BTOP:
5377c478bd9Sstevel@tonic-gate 		/*
53812f080e7Smrj 		 * Convert byte count input to physical page units.
53912f080e7Smrj 		 * (byte counts that are not a page-size multiple
54012f080e7Smrj 		 * are rounded down)
5417c478bd9Sstevel@tonic-gate 		 */
54212f080e7Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
5437c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5447c478bd9Sstevel@tonic-gate 
54512f080e7Smrj 	case DDI_CTLOPS_PTOB:
5467c478bd9Sstevel@tonic-gate 		/*
54712f080e7Smrj 		 * Convert size in physical pages to bytes
5487c478bd9Sstevel@tonic-gate 		 */
54912f080e7Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
5507c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5517c478bd9Sstevel@tonic-gate 
55212f080e7Smrj 	case DDI_CTLOPS_BTOPR:
5537c478bd9Sstevel@tonic-gate 		/*
55412f080e7Smrj 		 * Convert byte count input to physical page units
55512f080e7Smrj 		 * (byte counts that are not a page-size multiple
55612f080e7Smrj 		 * are rounded up)
5577c478bd9Sstevel@tonic-gate 		 */
55812f080e7Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
55912f080e7Smrj 		return (DDI_SUCCESS);
56012f080e7Smrj 
56112f080e7Smrj 	case DDI_CTLOPS_INITCHILD:
56212f080e7Smrj 		return (impl_ddi_sunbus_initchild(arg));
56312f080e7Smrj 
56412f080e7Smrj 	case DDI_CTLOPS_UNINITCHILD:
56512f080e7Smrj 		impl_ddi_sunbus_removechild(arg);
56612f080e7Smrj 		return (DDI_SUCCESS);
56712f080e7Smrj 
56812f080e7Smrj 	case DDI_CTLOPS_REPORTDEV:
56912f080e7Smrj 		return (rootnex_ctl_reportdev(rdip));
57012f080e7Smrj 
57112f080e7Smrj 	case DDI_CTLOPS_IOMIN:
5727c478bd9Sstevel@tonic-gate 		/*
57312f080e7Smrj 		 * Nothing to do here but reflect back..
5747c478bd9Sstevel@tonic-gate 		 */
5757c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
5767c478bd9Sstevel@tonic-gate 
57712f080e7Smrj 	case DDI_CTLOPS_REGSIZE:
57812f080e7Smrj 	case DDI_CTLOPS_NREGS:
57912f080e7Smrj 		break;
5807c478bd9Sstevel@tonic-gate 
58112f080e7Smrj 	case DDI_CTLOPS_SIDDEV:
58212f080e7Smrj 		if (ndi_dev_is_prom_node(rdip))
5837c478bd9Sstevel@tonic-gate 			return (DDI_SUCCESS);
58412f080e7Smrj 		if (ndi_dev_is_persistent_node(rdip))
58512f080e7Smrj 			return (DDI_SUCCESS);
5867c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
5877c478bd9Sstevel@tonic-gate 
58812f080e7Smrj 	case DDI_CTLOPS_POWER:
58912f080e7Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
59012f080e7Smrj 
591a195726fSgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
59212f080e7Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
59312f080e7Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
59412f080e7Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
595a195726fSgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
596a195726fSgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
59712f080e7Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
59812f080e7Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
59912f080e7Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
60012f080e7Smrj 			    "1 or more reserved/obsolete operations.");
6017c478bd9Sstevel@tonic-gate 		}
60212f080e7Smrj 		return (DDI_FAILURE);
6037c478bd9Sstevel@tonic-gate 
6047c478bd9Sstevel@tonic-gate 	default:
6057c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6067c478bd9Sstevel@tonic-gate 	}
60712f080e7Smrj 	/*
60812f080e7Smrj 	 * The rest are for "hardware" properties
60912f080e7Smrj 	 */
61012f080e7Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
61112f080e7Smrj 		return (DDI_FAILURE);
6127c478bd9Sstevel@tonic-gate 
61312f080e7Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
61412f080e7Smrj 		ptr = (int *)result;
61512f080e7Smrj 		*ptr = pdp->par_nreg;
61612f080e7Smrj 	} else {
61712f080e7Smrj 		off_t *size = (off_t *)result;
6187c478bd9Sstevel@tonic-gate 
61912f080e7Smrj 		ptr = (int *)arg;
62012f080e7Smrj 		n = *ptr;
62112f080e7Smrj 		if (n >= pdp->par_nreg) {
62212f080e7Smrj 			return (DDI_FAILURE);
62312f080e7Smrj 		}
62412f080e7Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
62512f080e7Smrj 	}
6267c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
6277c478bd9Sstevel@tonic-gate }
6287c478bd9Sstevel@tonic-gate 
62912f080e7Smrj 
63012f080e7Smrj /*
63112f080e7Smrj  * rootnex_ctl_reportdev()
63212f080e7Smrj  *
63312f080e7Smrj  */
6347c478bd9Sstevel@tonic-gate static int
63512f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev)
63612f080e7Smrj {
63712f080e7Smrj 	int i, n, len, f_len = 0;
63812f080e7Smrj 	char *buf;
63912f080e7Smrj 
64012f080e7Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
64112f080e7Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
64212f080e7Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
64312f080e7Smrj 	len = strlen(buf);
64412f080e7Smrj 
64512f080e7Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
64612f080e7Smrj 
64712f080e7Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
64812f080e7Smrj 
64912f080e7Smrj 		if (i == 0)
65012f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
65112f080e7Smrj 			    ": ");
65212f080e7Smrj 		else
65312f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
65412f080e7Smrj 			    " and ");
65512f080e7Smrj 		len = strlen(buf);
65612f080e7Smrj 
65712f080e7Smrj 		switch (rp->regspec_bustype) {
65812f080e7Smrj 
65912f080e7Smrj 		case BTEISA:
66012f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
66112f080e7Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
66212f080e7Smrj 			break;
66312f080e7Smrj 
66412f080e7Smrj 		case BTISA:
66512f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
66612f080e7Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
66712f080e7Smrj 			break;
66812f080e7Smrj 
66912f080e7Smrj 		default:
67012f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
67112f080e7Smrj 			    "space %x offset %x",
67212f080e7Smrj 			    rp->regspec_bustype, rp->regspec_addr);
67312f080e7Smrj 			break;
67412f080e7Smrj 		}
67512f080e7Smrj 		len = strlen(buf);
67612f080e7Smrj 	}
67712f080e7Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
67812f080e7Smrj 		int pri;
67912f080e7Smrj 
68012f080e7Smrj 		if (i != 0) {
68112f080e7Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
68212f080e7Smrj 			    ",");
68312f080e7Smrj 			len = strlen(buf);
68412f080e7Smrj 		}
68512f080e7Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
68612f080e7Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
68712f080e7Smrj 		    " sparc ipl %d", pri);
68812f080e7Smrj 		len = strlen(buf);
68912f080e7Smrj 	}
69012f080e7Smrj #ifdef DEBUG
69112f080e7Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
69212f080e7Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
69312f080e7Smrj 		    "printed length 1024, real length %d", f_len);
69412f080e7Smrj 	}
69512f080e7Smrj #endif /* DEBUG */
69612f080e7Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
69712f080e7Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
69812f080e7Smrj 	return (DDI_SUCCESS);
69912f080e7Smrj }
70012f080e7Smrj 
70112f080e7Smrj 
70212f080e7Smrj /*
70312f080e7Smrj  * ******************
70412f080e7Smrj  *  map related code
70512f080e7Smrj  * ******************
70612f080e7Smrj  */
70712f080e7Smrj 
70812f080e7Smrj /*
70912f080e7Smrj  * rootnex_map()
71012f080e7Smrj  *
71112f080e7Smrj  */
71212f080e7Smrj static int
71312f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
71412f080e7Smrj     off_t len, caddr_t *vaddrp)
7157c478bd9Sstevel@tonic-gate {
7167c478bd9Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
7177c478bd9Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
7187c478bd9Sstevel@tonic-gate 	int error;
7197c478bd9Sstevel@tonic-gate 
7207c478bd9Sstevel@tonic-gate 	mp = &mr;
7217c478bd9Sstevel@tonic-gate 
7227c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
7237c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
7247c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
7257c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
7267c478bd9Sstevel@tonic-gate 		break;
7277c478bd9Sstevel@tonic-gate 	default:
7287c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7297c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
7307c478bd9Sstevel@tonic-gate 		    mp->map_op);
7317c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7327c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
7337c478bd9Sstevel@tonic-gate 	}
7347c478bd9Sstevel@tonic-gate 
7357c478bd9Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
7367c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7377c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
7387c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7397c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
7407c478bd9Sstevel@tonic-gate 	}
7417c478bd9Sstevel@tonic-gate 
7427c478bd9Sstevel@tonic-gate 	/*
7437c478bd9Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
7447c478bd9Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
7457c478bd9Sstevel@tonic-gate 	 */
7467c478bd9Sstevel@tonic-gate 
7477c478bd9Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
7487c478bd9Sstevel@tonic-gate 
7497c478bd9Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
7507c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7517c478bd9Sstevel@tonic-gate 		static char *out_of_range =
7527c478bd9Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
7537c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7547c478bd9Sstevel@tonic-gate 
7557c478bd9Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
7567c478bd9Sstevel@tonic-gate 		if (rp == NULL)  {
7577c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7587c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
7597c478bd9Sstevel@tonic-gate 			    ddi_get_name(rdip));
7607c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7617c478bd9Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
7627c478bd9Sstevel@tonic-gate 		}
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 		/*
7657c478bd9Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
7667c478bd9Sstevel@tonic-gate 		 */
7677c478bd9Sstevel@tonic-gate 
7687c478bd9Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
7697c478bd9Sstevel@tonic-gate 		mp->map_obj.rp = rp;
7707c478bd9Sstevel@tonic-gate 	}
7717c478bd9Sstevel@tonic-gate 
7727c478bd9Sstevel@tonic-gate 	/*
7737c478bd9Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
7747c478bd9Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
7757c478bd9Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
7767c478bd9Sstevel@tonic-gate 	 */
7777c478bd9Sstevel@tonic-gate 
7787c478bd9Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
7797c478bd9Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
7807c478bd9Sstevel@tonic-gate 
7817c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
7827c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT,
7837c478bd9Sstevel@tonic-gate 		"rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>"
7847c478bd9Sstevel@tonic-gate 		" offset %d len %d handle 0x%x\n",
7857c478bd9Sstevel@tonic-gate 		ddi_get_name(dip), ddi_get_name(rdip),
7867c478bd9Sstevel@tonic-gate 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
7877c478bd9Sstevel@tonic-gate 		offset, len, mp->map_handlep);
7887c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
7897c478bd9Sstevel@tonic-gate 
7907c478bd9Sstevel@tonic-gate 	/*
7917c478bd9Sstevel@tonic-gate 	 * I/O or memory mapping:
7927c478bd9Sstevel@tonic-gate 	 *
7937c478bd9Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
7947c478bd9Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
7957c478bd9Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
7967c478bd9Sstevel@tonic-gate 	 */
7977c478bd9Sstevel@tonic-gate 
7987c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
7997c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8007c478bd9Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8017c478bd9Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8027c478bd9Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8037c478bd9Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8047c478bd9Sstevel@tonic-gate 	}
8057c478bd9Sstevel@tonic-gate 
8067c478bd9Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8077c478bd9Sstevel@tonic-gate 		/*
8087c478bd9Sstevel@tonic-gate 		 * compatibility i/o mapping
8097c478bd9Sstevel@tonic-gate 		 */
8107c478bd9Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8117c478bd9Sstevel@tonic-gate 	} else {
8127c478bd9Sstevel@tonic-gate 		/*
8137c478bd9Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8147c478bd9Sstevel@tonic-gate 		 */
8157c478bd9Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8167c478bd9Sstevel@tonic-gate 	}
8177c478bd9Sstevel@tonic-gate 
8187c478bd9Sstevel@tonic-gate 	if (len != 0)
8197c478bd9Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
8207c478bd9Sstevel@tonic-gate 
8217c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8227c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT,
8237c478bd9Sstevel@tonic-gate 		"             <%s,%s> <0x%x, 0x%x, 0x%d>"
8247c478bd9Sstevel@tonic-gate 		" offset %d len %d handle 0x%x\n",
8257c478bd9Sstevel@tonic-gate 		ddi_get_name(dip), ddi_get_name(rdip),
8267c478bd9Sstevel@tonic-gate 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
8277c478bd9Sstevel@tonic-gate 		offset, len, mp->map_handlep);
8287c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8297c478bd9Sstevel@tonic-gate 
8307c478bd9Sstevel@tonic-gate 	/*
8317c478bd9Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
8327c478bd9Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
8337c478bd9Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
8347c478bd9Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
8357c478bd9Sstevel@tonic-gate 	 */
8367c478bd9Sstevel@tonic-gate 
8377c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8387c478bd9Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
8397c478bd9Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
8407c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8417c478bd9Sstevel@tonic-gate 
8427c478bd9Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
8437c478bd9Sstevel@tonic-gate 		return (error);
8447c478bd9Sstevel@tonic-gate 
8457c478bd9Sstevel@tonic-gate 	switch (mp->map_op)  {
8467c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8477c478bd9Sstevel@tonic-gate 
8487c478bd9Sstevel@tonic-gate 		/*
8497c478bd9Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
8507c478bd9Sstevel@tonic-gate 		 */
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
8537c478bd9Sstevel@tonic-gate 
8547c478bd9Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8557c478bd9Sstevel@tonic-gate 
8567c478bd9Sstevel@tonic-gate 		/*
8577c478bd9Sstevel@tonic-gate 		 * Release mapping...
8587c478bd9Sstevel@tonic-gate 		 */
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
8617c478bd9Sstevel@tonic-gate 
8627c478bd9Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8637c478bd9Sstevel@tonic-gate 
8647c478bd9Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
8657c478bd9Sstevel@tonic-gate 
8667c478bd9Sstevel@tonic-gate 	default:
8677c478bd9Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8687c478bd9Sstevel@tonic-gate 	}
8697c478bd9Sstevel@tonic-gate }
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate 
8727c478bd9Sstevel@tonic-gate /*
87312f080e7Smrj  * rootnex_map_fault()
8747c478bd9Sstevel@tonic-gate  *
8757c478bd9Sstevel@tonic-gate  *	fault in mappings for requestors
8767c478bd9Sstevel@tonic-gate  */
8777c478bd9Sstevel@tonic-gate /*ARGSUSED*/
8787c478bd9Sstevel@tonic-gate static int
87912f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
88012f080e7Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
88112f080e7Smrj     uint_t lock)
8827c478bd9Sstevel@tonic-gate {
8837c478bd9Sstevel@tonic-gate 
8847c478bd9Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8857c478bd9Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
8867c478bd9Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
8877c478bd9Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
8887c478bd9Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
8897c478bd9Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate 	/*
8927c478bd9Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
8937c478bd9Sstevel@tonic-gate 	 *
8947c478bd9Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
8957c478bd9Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
8967c478bd9Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
8977c478bd9Sstevel@tonic-gate 	 */
8987c478bd9Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
8997c478bd9Sstevel@tonic-gate 		struct segdev_data *sdp =
9007c478bd9Sstevel@tonic-gate 			(struct segdev_data *)seg->s_data;
9017c478bd9Sstevel@tonic-gate 
9027c478bd9Sstevel@tonic-gate 		if (hat == NULL) {
9037c478bd9Sstevel@tonic-gate 			/*
9047c478bd9Sstevel@tonic-gate 			 * This is one plausible interpretation of
9057c478bd9Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9067c478bd9Sstevel@tonic-gate 			 * address space hat list which by convention is
9077c478bd9Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9087c478bd9Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9097c478bd9Sstevel@tonic-gate 			 */
9107c478bd9Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9117c478bd9Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9127c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9137c478bd9Sstevel@tonic-gate 		}
9147c478bd9Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9157c478bd9Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9167c478bd9Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9177c478bd9Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9187c478bd9Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9197c478bd9Sstevel@tonic-gate 	} else
9207c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
9217c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
9227c478bd9Sstevel@tonic-gate }
9237c478bd9Sstevel@tonic-gate 
9247c478bd9Sstevel@tonic-gate 
9257c478bd9Sstevel@tonic-gate /*
92612f080e7Smrj  * rootnex_map_regspec()
92712f080e7Smrj  *     we don't support mapping of I/O cards above 4Gb
9287c478bd9Sstevel@tonic-gate  */
9297c478bd9Sstevel@tonic-gate static int
93012f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
9317c478bd9Sstevel@tonic-gate {
93212f080e7Smrj 	ulong_t base;
93312f080e7Smrj 	void *cvaddr;
93412f080e7Smrj 	uint_t npages, pgoffset;
93512f080e7Smrj 	struct regspec *rp;
93612f080e7Smrj 	ddi_acc_hdl_t *hp;
93712f080e7Smrj 	ddi_acc_impl_t *ap;
93812f080e7Smrj 	uint_t	hat_acc_flags;
9397c478bd9Sstevel@tonic-gate 
94012f080e7Smrj 	rp = mp->map_obj.rp;
94112f080e7Smrj 	hp = mp->map_handlep;
94212f080e7Smrj 
94312f080e7Smrj #ifdef	DDI_MAP_DEBUG
94412f080e7Smrj 	ddi_map_debug(
94512f080e7Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
94612f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
94712f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
94812f080e7Smrj #endif	/* DDI_MAP_DEBUG */
9497c478bd9Sstevel@tonic-gate 
9507c478bd9Sstevel@tonic-gate 	/*
95112f080e7Smrj 	 * I/O or memory mapping
95212f080e7Smrj 	 *
95312f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
95412f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
95512f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
9567c478bd9Sstevel@tonic-gate 	 */
95712f080e7Smrj 
95812f080e7Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
95912f080e7Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
96012f080e7Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
96112f080e7Smrj 		    rp->regspec_addr, rp->regspec_size);
96212f080e7Smrj 		return (DDI_FAILURE);
9637c478bd9Sstevel@tonic-gate 	}
96412f080e7Smrj 
96512f080e7Smrj 	if (rp->regspec_bustype != 0) {
9667c478bd9Sstevel@tonic-gate 		/*
96712f080e7Smrj 		 * I/O space - needs a handle.
9687c478bd9Sstevel@tonic-gate 		 */
9697c478bd9Sstevel@tonic-gate 		if (hp == NULL) {
97012f080e7Smrj 			return (DDI_FAILURE);
9717c478bd9Sstevel@tonic-gate 		}
97212f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
97312f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
97412f080e7Smrj 		impl_acc_hdl_init(hp);
9757c478bd9Sstevel@tonic-gate 
97612f080e7Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
97712f080e7Smrj #ifdef  DDI_MAP_DEBUG
97812f080e7Smrj 			ddi_map_debug("rootnex_map_regspec: mmap() \
97912f080e7Smrj to I/O space is not supported.\n");
98012f080e7Smrj #endif  /* DDI_MAP_DEBUG */
98112f080e7Smrj 			return (DDI_ME_INVAL);
9827c478bd9Sstevel@tonic-gate 		} else {
9837c478bd9Sstevel@tonic-gate 			/*
98412f080e7Smrj 			 * 1275-compliant vs. compatibility i/o mapping
9857c478bd9Sstevel@tonic-gate 			 */
98612f080e7Smrj 			*vaddrp =
98712f080e7Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
98812f080e7Smrj 				((caddr_t)(uintptr_t)rp->regspec_bustype) :
98912f080e7Smrj 				((caddr_t)(uintptr_t)rp->regspec_addr);
99000d0963fSdilpreet 
99100d0963fSdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
99200d0963fSdilpreet 			    (~MMU_PAGEOFFSET));
99300d0963fSdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
99400d0963fSdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
9957c478bd9Sstevel@tonic-gate 		}
9967c478bd9Sstevel@tonic-gate 
99712f080e7Smrj #ifdef	DDI_MAP_DEBUG
99812f080e7Smrj 		ddi_map_debug(
99912f080e7Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
100012f080e7Smrj 		    rp->regspec_size, *vaddrp);
100112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
100212f080e7Smrj 		return (DDI_SUCCESS);
10037c478bd9Sstevel@tonic-gate 	}
10047c478bd9Sstevel@tonic-gate 
10057c478bd9Sstevel@tonic-gate 	/*
100612f080e7Smrj 	 * Memory space
100712f080e7Smrj 	 */
100812f080e7Smrj 
100912f080e7Smrj 	if (hp != NULL) {
101012f080e7Smrj 		/*
101112f080e7Smrj 		 * hat layer ignores
101212f080e7Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
101312f080e7Smrj 		 */
101412f080e7Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
101512f080e7Smrj 		case DDI_STRICTORDER_ACC:
101612f080e7Smrj 			hat_acc_flags = HAT_STRICTORDER;
101712f080e7Smrj 			break;
101812f080e7Smrj 		case DDI_UNORDERED_OK_ACC:
101912f080e7Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
102012f080e7Smrj 			break;
102112f080e7Smrj 		case DDI_MERGING_OK_ACC:
102212f080e7Smrj 			hat_acc_flags = HAT_MERGING_OK;
102312f080e7Smrj 			break;
102412f080e7Smrj 		case DDI_LOADCACHING_OK_ACC:
102512f080e7Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
102612f080e7Smrj 			break;
102712f080e7Smrj 		case DDI_STORECACHING_OK_ACC:
102812f080e7Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
102912f080e7Smrj 			break;
103012f080e7Smrj 		}
103112f080e7Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
103212f080e7Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
103312f080e7Smrj 		impl_acc_hdl_init(hp);
103412f080e7Smrj 		hp->ah_hat_flags = hat_acc_flags;
103512f080e7Smrj 	} else {
103612f080e7Smrj 		hat_acc_flags = HAT_STRICTORDER;
103712f080e7Smrj 	}
103812f080e7Smrj 
103912f080e7Smrj 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
104012f080e7Smrj 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
104112f080e7Smrj 
104212f080e7Smrj 	if (rp->regspec_size == 0) {
104312f080e7Smrj #ifdef  DDI_MAP_DEBUG
104412f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
104512f080e7Smrj #endif  /* DDI_MAP_DEBUG */
104612f080e7Smrj 		return (DDI_ME_INVAL);
104712f080e7Smrj 	}
104812f080e7Smrj 
104912f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
105012f080e7Smrj 		*vaddrp = (caddr_t)mmu_btop(base);
105112f080e7Smrj 	} else {
105212f080e7Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
105312f080e7Smrj 
105412f080e7Smrj #ifdef	DDI_MAP_DEBUG
105512f080e7Smrj 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages \
105612f080e7Smrj physical %x ",
105712f080e7Smrj 		    npages, base);
105812f080e7Smrj #endif	/* DDI_MAP_DEBUG */
105912f080e7Smrj 
106012f080e7Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
106112f080e7Smrj 		if (cvaddr == NULL)
106212f080e7Smrj 			return (DDI_ME_NORESOURCES);
106312f080e7Smrj 
106412f080e7Smrj 		/*
106512f080e7Smrj 		 * Now map in the pages we've allocated...
106612f080e7Smrj 		 */
106712f080e7Smrj 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
106812f080e7Smrj 		    mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK);
106912f080e7Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
107000d0963fSdilpreet 
107100d0963fSdilpreet 		/* save away pfn and npages for FMA */
107200d0963fSdilpreet 		hp = mp->map_handlep;
107300d0963fSdilpreet 		if (hp) {
107400d0963fSdilpreet 			hp->ah_pfn = mmu_btop(base);
107500d0963fSdilpreet 			hp->ah_pnum = npages;
107600d0963fSdilpreet 		}
107712f080e7Smrj 	}
107812f080e7Smrj 
107912f080e7Smrj #ifdef	DDI_MAP_DEBUG
108012f080e7Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
108112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
108212f080e7Smrj 	return (DDI_SUCCESS);
108312f080e7Smrj }
108412f080e7Smrj 
108512f080e7Smrj 
108612f080e7Smrj /*
108712f080e7Smrj  * rootnex_unmap_regspec()
10887c478bd9Sstevel@tonic-gate  *
10897c478bd9Sstevel@tonic-gate  */
10907c478bd9Sstevel@tonic-gate static int
109112f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
10927c478bd9Sstevel@tonic-gate {
109312f080e7Smrj 	caddr_t addr = (caddr_t)*vaddrp;
109412f080e7Smrj 	uint_t npages, pgoffset;
109512f080e7Smrj 	struct regspec *rp;
10967c478bd9Sstevel@tonic-gate 
109712f080e7Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
109812f080e7Smrj 		return (0);
10997c478bd9Sstevel@tonic-gate 
110012f080e7Smrj 	rp = mp->map_obj.rp;
11017c478bd9Sstevel@tonic-gate 
110212f080e7Smrj 	if (rp->regspec_size == 0) {
110312f080e7Smrj #ifdef  DDI_MAP_DEBUG
110412f080e7Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
110512f080e7Smrj #endif  /* DDI_MAP_DEBUG */
110612f080e7Smrj 		return (DDI_ME_INVAL);
11077c478bd9Sstevel@tonic-gate 	}
11087c478bd9Sstevel@tonic-gate 
11097c478bd9Sstevel@tonic-gate 	/*
111012f080e7Smrj 	 * I/O or memory mapping:
11117c478bd9Sstevel@tonic-gate 	 *
111212f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
111312f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
111412f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
11157c478bd9Sstevel@tonic-gate 	 */
111612f080e7Smrj 	if (rp->regspec_bustype != 0) {
11177c478bd9Sstevel@tonic-gate 		/*
111812f080e7Smrj 		 * This is I/O space, which requires no particular
111912f080e7Smrj 		 * processing on unmap since it isn't mapped in the
112012f080e7Smrj 		 * first place.
11217c478bd9Sstevel@tonic-gate 		 */
11227c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
11237c478bd9Sstevel@tonic-gate 	}
11247c478bd9Sstevel@tonic-gate 
11257c478bd9Sstevel@tonic-gate 	/*
112612f080e7Smrj 	 * Memory space
11277c478bd9Sstevel@tonic-gate 	 */
112812f080e7Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
112912f080e7Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
113012f080e7Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
113112f080e7Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 	/*
113412f080e7Smrj 	 * Destroy the pointer - the mapping has logically gone
11357c478bd9Sstevel@tonic-gate 	 */
113612f080e7Smrj 	*vaddrp = NULL;
11377c478bd9Sstevel@tonic-gate 
11387c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
11397c478bd9Sstevel@tonic-gate }
11407c478bd9Sstevel@tonic-gate 
114112f080e7Smrj 
114212f080e7Smrj /*
114312f080e7Smrj  * rootnex_map_handle()
114412f080e7Smrj  *
114512f080e7Smrj  */
11467c478bd9Sstevel@tonic-gate static int
114712f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp)
11487c478bd9Sstevel@tonic-gate {
114912f080e7Smrj 	ddi_acc_hdl_t *hp;
115012f080e7Smrj 	ulong_t base;
115112f080e7Smrj 	uint_t pgoffset;
115212f080e7Smrj 	struct regspec *rp;
11537c478bd9Sstevel@tonic-gate 
115412f080e7Smrj 	rp = mp->map_obj.rp;
11557c478bd9Sstevel@tonic-gate 
115612f080e7Smrj #ifdef	DDI_MAP_DEBUG
115712f080e7Smrj 	ddi_map_debug(
115812f080e7Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
115912f080e7Smrj 	    rp->regspec_bustype, rp->regspec_addr,
116012f080e7Smrj 	    rp->regspec_size, mp->map_handlep);
116112f080e7Smrj #endif	/* DDI_MAP_DEBUG */
11627c478bd9Sstevel@tonic-gate 
11637c478bd9Sstevel@tonic-gate 	/*
116412f080e7Smrj 	 * I/O or memory mapping:
116512f080e7Smrj 	 *
116612f080e7Smrj 	 *	<bustype=0, addr=x, len=x>: memory
116712f080e7Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
116812f080e7Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
11697c478bd9Sstevel@tonic-gate 	 */
117012f080e7Smrj 	if (rp->regspec_bustype != 0) {
117112f080e7Smrj 		/*
117212f080e7Smrj 		 * This refers to I/O space, and we don't support "mapping"
117312f080e7Smrj 		 * I/O space to a user.
117412f080e7Smrj 		 */
11757c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
11767c478bd9Sstevel@tonic-gate 	}
11777c478bd9Sstevel@tonic-gate 
11787c478bd9Sstevel@tonic-gate 	/*
117912f080e7Smrj 	 * Set up the hat_flags for the mapping.
11807c478bd9Sstevel@tonic-gate 	 */
118112f080e7Smrj 	hp = mp->map_handlep;
11827c478bd9Sstevel@tonic-gate 
118312f080e7Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
118412f080e7Smrj 	case DDI_NEVERSWAP_ACC:
118512f080e7Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
11867c478bd9Sstevel@tonic-gate 		break;
118712f080e7Smrj 	case DDI_STRUCTURE_LE_ACC:
118812f080e7Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
11897c478bd9Sstevel@tonic-gate 		break;
119012f080e7Smrj 	case DDI_STRUCTURE_BE_ACC:
11917c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
11927c478bd9Sstevel@tonic-gate 	default:
119312f080e7Smrj 		return (DDI_REGS_ACC_CONFLICT);
11947c478bd9Sstevel@tonic-gate 	}
11957c478bd9Sstevel@tonic-gate 
119612f080e7Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
119712f080e7Smrj 	case DDI_STRICTORDER_ACC:
11987c478bd9Sstevel@tonic-gate 		break;
119912f080e7Smrj 	case DDI_UNORDERED_OK_ACC:
120012f080e7Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
12017c478bd9Sstevel@tonic-gate 		break;
120212f080e7Smrj 	case DDI_MERGING_OK_ACC:
120312f080e7Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
12047c478bd9Sstevel@tonic-gate 		break;
120512f080e7Smrj 	case DDI_LOADCACHING_OK_ACC:
120612f080e7Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
120712f080e7Smrj 		break;
120812f080e7Smrj 	case DDI_STORECACHING_OK_ACC:
120912f080e7Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
121012f080e7Smrj 		break;
12117c478bd9Sstevel@tonic-gate 	default:
12127c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12137c478bd9Sstevel@tonic-gate 	}
12147c478bd9Sstevel@tonic-gate 
121512f080e7Smrj 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
121612f080e7Smrj 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
12177c478bd9Sstevel@tonic-gate 
121812f080e7Smrj 	if (rp->regspec_size == 0)
121912f080e7Smrj 		return (DDI_ME_INVAL);
12207c478bd9Sstevel@tonic-gate 
122112f080e7Smrj 	hp->ah_pfn = mmu_btop(base);
122212f080e7Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
12237c478bd9Sstevel@tonic-gate 
12247c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
12257c478bd9Sstevel@tonic-gate }
12267c478bd9Sstevel@tonic-gate 
122712f080e7Smrj 
122812f080e7Smrj 
12297c478bd9Sstevel@tonic-gate /*
123012f080e7Smrj  * ************************
123112f080e7Smrj  *  interrupt related code
123212f080e7Smrj  * ************************
12337c478bd9Sstevel@tonic-gate  */
12347c478bd9Sstevel@tonic-gate 
12357c478bd9Sstevel@tonic-gate /*
123612f080e7Smrj  * rootnex_intr_ops()
12377c478bd9Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
12387c478bd9Sstevel@tonic-gate  */
12397c478bd9Sstevel@tonic-gate /* ARGSUSED */
12407c478bd9Sstevel@tonic-gate static int
12417c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
12427c478bd9Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
12437c478bd9Sstevel@tonic-gate {
12447c478bd9Sstevel@tonic-gate 	struct intrspec			*ispec;
12457c478bd9Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
12467c478bd9Sstevel@tonic-gate 
12477c478bd9Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
12487c478bd9Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
12497c478bd9Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
12507c478bd9Sstevel@tonic-gate 
12517c478bd9Sstevel@tonic-gate 	/* Process the interrupt operation */
12527c478bd9Sstevel@tonic-gate 	switch (intr_op) {
12537c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
12547c478bd9Sstevel@tonic-gate 		/* First check with pcplusmp */
12557c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
12567c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12577c478bd9Sstevel@tonic-gate 
12587c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
12597c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
12607c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12617c478bd9Sstevel@tonic-gate 		}
12627c478bd9Sstevel@tonic-gate 		break;
12637c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
12647c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
12657c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12667c478bd9Sstevel@tonic-gate 
12677c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
12687c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12697c478bd9Sstevel@tonic-gate 		break;
12707c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
12717c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
12727c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12737c478bd9Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
12747c478bd9Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
12757c478bd9Sstevel@tonic-gate 		break;
12767c478bd9Sstevel@tonic-gate 	case DDI_INTROP_FREE:
12777c478bd9Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
12787c478bd9Sstevel@tonic-gate 		/*
12797c478bd9Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
12807c478bd9Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
12817c478bd9Sstevel@tonic-gate 		 * See detailed comments on this in the function
12827c478bd9Sstevel@tonic-gate 		 * rootnex_get_ispec().
12837c478bd9Sstevel@tonic-gate 		 */
12847c478bd9Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
12857c478bd9Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
12867c478bd9Sstevel@tonic-gate 			    pdp->par_nintr);
12877c478bd9Sstevel@tonic-gate 			/*
12887c478bd9Sstevel@tonic-gate 			 * Set it to zero; so that
12897c478bd9Sstevel@tonic-gate 			 * DDI framework doesn't free it again
12907c478bd9Sstevel@tonic-gate 			 */
12917c478bd9Sstevel@tonic-gate 			pdp->par_intr = NULL;
12927c478bd9Sstevel@tonic-gate 			pdp->par_nintr = 0;
12937c478bd9Sstevel@tonic-gate 		}
12947c478bd9Sstevel@tonic-gate 		break;
12957c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
12967c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
12977c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12987c478bd9Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
12997c478bd9Sstevel@tonic-gate 		break;
13007c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
13017c478bd9Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
13027c478bd9Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
13037c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13047c478bd9Sstevel@tonic-gate 
13057c478bd9Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
13067c478bd9Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
13077c478bd9Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
13087c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13097c478bd9Sstevel@tonic-gate 
13107c478bd9Sstevel@tonic-gate 		/* Change the priority */
13117c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
13127c478bd9Sstevel@tonic-gate 		    PSM_FAILURE)
13137c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13147c478bd9Sstevel@tonic-gate 
13157c478bd9Sstevel@tonic-gate 		/* update the ispec with the new priority */
13167c478bd9Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
13177c478bd9Sstevel@tonic-gate 		break;
13187c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
13197c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13207c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13217c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
13227c478bd9Sstevel@tonic-gate 		break;
13237c478bd9Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
13247c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13257c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13267c478bd9Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
13277c478bd9Sstevel@tonic-gate 		break;
13287c478bd9Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
13297c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13307c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13317c478bd9Sstevel@tonic-gate 
13327c478bd9Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
13337c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13347c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13357c478bd9Sstevel@tonic-gate 
13367a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
13377c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
13387c478bd9Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
13397c478bd9Sstevel@tonic-gate 
13407c478bd9Sstevel@tonic-gate 		/* Add the interrupt handler */
13417c478bd9Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
13427c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
13437a364d25Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
13447c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13457c478bd9Sstevel@tonic-gate 		break;
13467c478bd9Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
13477c478bd9Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13487c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13497c478bd9Sstevel@tonic-gate 
13507c478bd9Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
13517c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13527c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13537c478bd9Sstevel@tonic-gate 
13547a364d25Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
13557c478bd9Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
13567c478bd9Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
13577c478bd9Sstevel@tonic-gate 
13587c478bd9Sstevel@tonic-gate 		/* Remove the interrupt handler */
13597c478bd9Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
13607c478bd9Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
13617c478bd9Sstevel@tonic-gate 		break;
13627c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
13637c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13647c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13657c478bd9Sstevel@tonic-gate 
13667c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
13677c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13687c478bd9Sstevel@tonic-gate 		break;
13697c478bd9Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
13707c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13717c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13727c478bd9Sstevel@tonic-gate 
13737c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
13747c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13757c478bd9Sstevel@tonic-gate 		break;
13767c478bd9Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
13777c478bd9Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13787c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13797c478bd9Sstevel@tonic-gate 
13807c478bd9Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
13817c478bd9Sstevel@tonic-gate 		    result)) {
13827c478bd9Sstevel@tonic-gate 			*(int *)result = 0;
13837c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
13847c478bd9Sstevel@tonic-gate 		}
13857c478bd9Sstevel@tonic-gate 		break;
1386*a54f81fbSanish 	case DDI_INTROP_NAVAIL:
13877c478bd9Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
1388*a54f81fbSanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
1389*a54f81fbSanish 		if (*(int *)result == 0) {
13907c478bd9Sstevel@tonic-gate 			/*
13917c478bd9Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
13927c478bd9Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
13937c478bd9Sstevel@tonic-gate 			 *
13947c478bd9Sstevel@tonic-gate 			 * See detailed comments on this in the function
13957c478bd9Sstevel@tonic-gate 			 * rootnex_get_ispec().
13967c478bd9Sstevel@tonic-gate 			 *
13977c478bd9Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
13987c478bd9Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
13997c478bd9Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
14007c478bd9Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
14017c478bd9Sstevel@tonic-gate 			 */
14027c478bd9Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
14037c478bd9Sstevel@tonic-gate 				*(int *)result = 1;
1404*a54f81fbSanish 			else
1405*a54f81fbSanish 				return (DDI_FAILURE);
14067c478bd9Sstevel@tonic-gate 		}
14077c478bd9Sstevel@tonic-gate 		break;
14087c478bd9Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
1409*a54f81fbSanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
14107c478bd9Sstevel@tonic-gate 		break;
14117c478bd9Sstevel@tonic-gate 	default:
14127c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
14137c478bd9Sstevel@tonic-gate 	}
14147c478bd9Sstevel@tonic-gate 
14157c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
14167c478bd9Sstevel@tonic-gate }
14177c478bd9Sstevel@tonic-gate 
14187c478bd9Sstevel@tonic-gate 
14197c478bd9Sstevel@tonic-gate /*
142012f080e7Smrj  * rootnex_get_ispec()
142112f080e7Smrj  *	convert an interrupt number to an interrupt specification.
142212f080e7Smrj  *	The interrupt number determines which interrupt spec will be
142312f080e7Smrj  *	returned if more than one exists.
142412f080e7Smrj  *
142512f080e7Smrj  *	Look into the parent private data area of the 'rdip' to find out
142612f080e7Smrj  *	the interrupt specification.  First check to make sure there is
142712f080e7Smrj  *	one that matchs "inumber" and then return a pointer to it.
142812f080e7Smrj  *
142912f080e7Smrj  *	Return NULL if one could not be found.
143012f080e7Smrj  *
143112f080e7Smrj  *	NOTE: This is needed for rootnex_intr_ops()
14327c478bd9Sstevel@tonic-gate  */
143312f080e7Smrj static struct intrspec *
143412f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
14357c478bd9Sstevel@tonic-gate {
143612f080e7Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
14377c478bd9Sstevel@tonic-gate 
14387c478bd9Sstevel@tonic-gate 	/*
143912f080e7Smrj 	 * Special case handling for drivers that provide their own
144012f080e7Smrj 	 * intrspec structures instead of relying on the DDI framework.
144112f080e7Smrj 	 *
144212f080e7Smrj 	 * A broken hardware driver in ON could potentially provide its
144312f080e7Smrj 	 * own intrspec structure, instead of relying on the hardware.
144412f080e7Smrj 	 * If these drivers are children of 'rootnex' then we need to
144512f080e7Smrj 	 * continue to provide backward compatibility to them here.
144612f080e7Smrj 	 *
144712f080e7Smrj 	 * Following check is a special case for 'pcic' driver which
144812f080e7Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
144912f080e7Smrj 	 *
145012f080e7Smrj 	 * Verbatim comments from this driver are shown here:
145112f080e7Smrj 	 * "Don't use the ddi_add_intr since we don't have a
145212f080e7Smrj 	 * default intrspec in all cases."
145312f080e7Smrj 	 *
145412f080e7Smrj 	 * Since an 'ispec' may not be always created for it,
145512f080e7Smrj 	 * check for that and create one if so.
145612f080e7Smrj 	 *
145712f080e7Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
14587c478bd9Sstevel@tonic-gate 	 */
145912f080e7Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
146012f080e7Smrj 		pdp->par_nintr = 1;
146112f080e7Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
146212f080e7Smrj 		    pdp->par_nintr, KM_SLEEP);
146312f080e7Smrj 	}
146412f080e7Smrj 
146512f080e7Smrj 	/* Validate the interrupt number */
146612f080e7Smrj 	if (inum >= pdp->par_nintr)
146712f080e7Smrj 		return (NULL);
146812f080e7Smrj 
146912f080e7Smrj 	/* Get the interrupt structure pointer and return that */
147012f080e7Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
147112f080e7Smrj }
147212f080e7Smrj 
147312f080e7Smrj 
147412f080e7Smrj /*
147512f080e7Smrj  * ******************
147612f080e7Smrj  *  dma related code
147712f080e7Smrj  * ******************
147812f080e7Smrj  */
147912f080e7Smrj 
148012f080e7Smrj /*
148112f080e7Smrj  * rootnex_dma_allochdl()
148212f080e7Smrj  *    called from ddi_dma_alloc_handle().
148312f080e7Smrj  */
148412f080e7Smrj /*ARGSUSED*/
148512f080e7Smrj static int
148612f080e7Smrj rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
148712f080e7Smrj     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
148812f080e7Smrj {
148912f080e7Smrj 	uint64_t maxsegmentsize_ll;
149012f080e7Smrj 	uint_t maxsegmentsize;
149112f080e7Smrj 	ddi_dma_impl_t *hp;
149212f080e7Smrj 	rootnex_dma_t *dma;
149312f080e7Smrj 	uint64_t count_max;
149412f080e7Smrj 	uint64_t seg;
149512f080e7Smrj 	int kmflag;
149612f080e7Smrj 	int e;
149712f080e7Smrj 
149812f080e7Smrj 
149912f080e7Smrj 	/* convert our sleep flags */
150012f080e7Smrj 	if (waitfp == DDI_DMA_SLEEP) {
150112f080e7Smrj 		kmflag = KM_SLEEP;
150212f080e7Smrj 	} else {
150312f080e7Smrj 		kmflag = KM_NOSLEEP;
150412f080e7Smrj 	}
150512f080e7Smrj 
150612f080e7Smrj 	/*
150712f080e7Smrj 	 * We try to do only one memory allocation here. We'll do a little
150812f080e7Smrj 	 * pointer manipulation later. If the bind ends up taking more than
150912f080e7Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
151012f080e7Smrj 	 * bind operation. Not great, but much better than before and the
151112f080e7Smrj 	 * best we can do with the current bind interfaces.
151212f080e7Smrj 	 */
151312f080e7Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
151412f080e7Smrj 	if (hp == NULL) {
151512f080e7Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
151612f080e7Smrj 			ddi_set_callback(waitfp, arg,
151712f080e7Smrj 			    &rootnex_state->r_dvma_call_list_id);
151812f080e7Smrj 		}
151912f080e7Smrj 		return (DDI_DMA_NORESOURCES);
152012f080e7Smrj 	}
152112f080e7Smrj 
152212f080e7Smrj 	/* Do our pointer manipulation now, align the structures */
152312f080e7Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
152412f080e7Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
152512f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
152612f080e7Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
152712f080e7Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
152812f080e7Smrj 
152912f080e7Smrj 	/* setup the handle */
153012f080e7Smrj 	rootnex_clean_dmahdl(hp);
153112f080e7Smrj 	dma->dp_dip = rdip;
153212f080e7Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
153312f080e7Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
153412f080e7Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
153512f080e7Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
153612f080e7Smrj 	hp->dmai_rdip = rdip;
153712f080e7Smrj 	hp->dmai_attr = *attr;
153812f080e7Smrj 
153912f080e7Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
154012f080e7Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
154112f080e7Smrj 
154212f080e7Smrj 	/*
154312f080e7Smrj 	 * Figure out our maximum segment size. If the segment size is greater
154412f080e7Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
154512f080e7Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
154612f080e7Smrj 	 * dma_attr_count_max are size-1 type values.
154712f080e7Smrj 	 *
154812f080e7Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
154912f080e7Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
155012f080e7Smrj 	 * single cookie).
155112f080e7Smrj 	 */
155212f080e7Smrj 
155312f080e7Smrj 	/* handle the rollover cases */
155412f080e7Smrj 	seg = attr->dma_attr_seg + 1;
155512f080e7Smrj 	if (seg < attr->dma_attr_seg) {
155612f080e7Smrj 		seg = attr->dma_attr_seg;
155712f080e7Smrj 	}
155812f080e7Smrj 	count_max = attr->dma_attr_count_max + 1;
155912f080e7Smrj 	if (count_max < attr->dma_attr_count_max) {
156012f080e7Smrj 		count_max = attr->dma_attr_count_max;
156112f080e7Smrj 	}
156212f080e7Smrj 
156312f080e7Smrj 	/*
156412f080e7Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
156512f080e7Smrj 	 * use a simple mask.
156612f080e7Smrj 	 */
156712f080e7Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
156812f080e7Smrj 		dma->dp_granularity_power_2 = B_FALSE;
156912f080e7Smrj 	} else {
157012f080e7Smrj 		dma->dp_granularity_power_2 = B_TRUE;
157112f080e7Smrj 	}
157212f080e7Smrj 
157312f080e7Smrj 	/*
157412f080e7Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
157512f080e7Smrj 	 * break up a window because we're greater than maxxfer, we might as
157612f080e7Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
157712f080e7Smrj 	 * worry about triming the window later on for this case.
157812f080e7Smrj 	 */
157912f080e7Smrj 	if (attr->dma_attr_granular > 1) {
158012f080e7Smrj 		if (dma->dp_granularity_power_2) {
158112f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
158212f080e7Smrj 			    (attr->dma_attr_maxxfer &
158312f080e7Smrj 			    (attr->dma_attr_granular - 1));
158412f080e7Smrj 		} else {
158512f080e7Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
158612f080e7Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
158712f080e7Smrj 		}
158812f080e7Smrj 	} else {
158912f080e7Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
159012f080e7Smrj 	}
159112f080e7Smrj 
159212f080e7Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
159312f080e7Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
159412f080e7Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
159512f080e7Smrj 		maxsegmentsize = 0xFFFFFFFF;
159612f080e7Smrj 	} else {
159712f080e7Smrj 		maxsegmentsize = maxsegmentsize_ll;
159812f080e7Smrj 	}
159912f080e7Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
160012f080e7Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
160112f080e7Smrj 
160212f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
160312f080e7Smrj 	if (rootnex_alloc_check_parms) {
160412f080e7Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
160512f080e7Smrj 		if (e != DDI_SUCCESS) {
160612f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
160712f080e7Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
160812f080e7Smrj 			    (ddi_dma_handle_t)hp);
160912f080e7Smrj 			return (e);
161012f080e7Smrj 		}
161112f080e7Smrj 	}
161212f080e7Smrj 
161312f080e7Smrj 	*handlep = (ddi_dma_handle_t)hp;
161412f080e7Smrj 
161512f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
161612f080e7Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
161712f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
161812f080e7Smrj 
161912f080e7Smrj 	return (DDI_SUCCESS);
162012f080e7Smrj }
162112f080e7Smrj 
162212f080e7Smrj 
162312f080e7Smrj /*
162412f080e7Smrj  * rootnex_dma_freehdl()
162512f080e7Smrj  *    called from ddi_dma_free_handle().
162612f080e7Smrj  */
162712f080e7Smrj /*ARGSUSED*/
162812f080e7Smrj static int
162912f080e7Smrj rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
163012f080e7Smrj {
163112f080e7Smrj 	ddi_dma_impl_t *hp;
163212f080e7Smrj 	rootnex_dma_t *dma;
163312f080e7Smrj 
163412f080e7Smrj 
163512f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
163612f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
163712f080e7Smrj 
163812f080e7Smrj 	/* unbind should have been called first */
163912f080e7Smrj 	ASSERT(!dma->dp_inuse);
164012f080e7Smrj 
164112f080e7Smrj 	mutex_destroy(&dma->dp_mutex);
164212f080e7Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
164312f080e7Smrj 
164412f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
164512f080e7Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
164612f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
164712f080e7Smrj 
164812f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
164912f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
165012f080e7Smrj 
165112f080e7Smrj 	return (DDI_SUCCESS);
165212f080e7Smrj }
165312f080e7Smrj 
165412f080e7Smrj 
165512f080e7Smrj /*
165612f080e7Smrj  * rootnex_dma_bindhdl()
165712f080e7Smrj  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
165812f080e7Smrj  */
165912f080e7Smrj /*ARGSUSED*/
166012f080e7Smrj static int
166112f080e7Smrj rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
166212f080e7Smrj     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
166312f080e7Smrj {
166412f080e7Smrj 	rootnex_sglinfo_t *sinfo;
166512f080e7Smrj 	ddi_dma_attr_t *attr;
166612f080e7Smrj 	ddi_dma_impl_t *hp;
166712f080e7Smrj 	rootnex_dma_t *dma;
166812f080e7Smrj 	int kmflag;
166912f080e7Smrj 	int e;
167012f080e7Smrj 
167112f080e7Smrj 
167212f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
167312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
167412f080e7Smrj 	sinfo = &dma->dp_sglinfo;
167512f080e7Smrj 	attr = &hp->dmai_attr;
167612f080e7Smrj 
167712f080e7Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
167812f080e7Smrj 
167912f080e7Smrj 	/*
168012f080e7Smrj 	 * This is useful for debugging a driver. Not as useful in a production
168112f080e7Smrj 	 * system. The only time this will fail is if you have a driver bug.
168212f080e7Smrj 	 */
168312f080e7Smrj 	if (rootnex_bind_check_inuse) {
168412f080e7Smrj 		/*
168512f080e7Smrj 		 * No one else should ever have this lock unless someone else
168612f080e7Smrj 		 * is trying to use this handle. So contention on the lock
168712f080e7Smrj 		 * is the same as inuse being set.
168812f080e7Smrj 		 */
168912f080e7Smrj 		e = mutex_tryenter(&dma->dp_mutex);
169012f080e7Smrj 		if (e == 0) {
169112f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
169212f080e7Smrj 			return (DDI_DMA_INUSE);
169312f080e7Smrj 		}
169412f080e7Smrj 		if (dma->dp_inuse) {
169512f080e7Smrj 			mutex_exit(&dma->dp_mutex);
169612f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
169712f080e7Smrj 			return (DDI_DMA_INUSE);
169812f080e7Smrj 		}
169912f080e7Smrj 		dma->dp_inuse = B_TRUE;
170012f080e7Smrj 		mutex_exit(&dma->dp_mutex);
170112f080e7Smrj 	}
170212f080e7Smrj 
170312f080e7Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
170412f080e7Smrj 	if (rootnex_bind_check_parms) {
170512f080e7Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
170612f080e7Smrj 		if (e != DDI_SUCCESS) {
170712f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
170812f080e7Smrj 			rootnex_clean_dmahdl(hp);
170912f080e7Smrj 			return (e);
171012f080e7Smrj 		}
171112f080e7Smrj 	}
171212f080e7Smrj 
171312f080e7Smrj 	/* save away the original bind info */
171412f080e7Smrj 	dma->dp_dma = dmareq->dmar_object;
171512f080e7Smrj 
171612f080e7Smrj 	/*
171712f080e7Smrj 	 * Figure out a rough estimate of what maximum number of pages this
171812f080e7Smrj 	 * buffer could use (a high estimate of course).
171912f080e7Smrj 	 */
172012f080e7Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
172112f080e7Smrj 
172212f080e7Smrj 	/*
172312f080e7Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
172412f080e7Smrj 	 * fit (more important to be consistent, we don't want to create
172512f080e7Smrj 	 * additional degenerate cases).
172612f080e7Smrj 	 */
172712f080e7Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
172812f080e7Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
172912f080e7Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
173012f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
173112f080e7Smrj 		    uint_t, sinfo->si_max_pages);
173212f080e7Smrj 
173312f080e7Smrj 	/*
173412f080e7Smrj 	 * For anything larger than that, we'll go ahead and allocate the
173512f080e7Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
173612f080e7Smrj 	 * seeing this path in the fast path for high performance devices very
173712f080e7Smrj 	 * frequently.
173812f080e7Smrj 	 *
173912f080e7Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
174012f080e7Smrj 	 * the bind interface would speed this case up.
174112f080e7Smrj 	 */
174212f080e7Smrj 	} else {
174312f080e7Smrj 		/* convert the sleep flags */
174412f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
174512f080e7Smrj 			kmflag =  KM_SLEEP;
174612f080e7Smrj 		} else {
174712f080e7Smrj 			kmflag =  KM_NOSLEEP;
174812f080e7Smrj 		}
174912f080e7Smrj 
175012f080e7Smrj 		/*
175112f080e7Smrj 		 * Save away how much memory we allocated. If we're doing a
175212f080e7Smrj 		 * nosleep, the alloc could fail...
175312f080e7Smrj 		 */
175412f080e7Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
175512f080e7Smrj 		    sizeof (ddi_dma_cookie_t);
175612f080e7Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
175712f080e7Smrj 		if (dma->dp_cookies == NULL) {
175812f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
175912f080e7Smrj 			rootnex_clean_dmahdl(hp);
176012f080e7Smrj 			return (DDI_DMA_NORESOURCES);
176112f080e7Smrj 		}
176212f080e7Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
176312f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
176412f080e7Smrj 		    sinfo->si_max_pages);
176512f080e7Smrj 	}
176612f080e7Smrj 	hp->dmai_cookie = dma->dp_cookies;
176712f080e7Smrj 
176812f080e7Smrj 	/*
176912f080e7Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
177012f080e7Smrj 	 * looking at the contraints in the dma structure. It will then put some
177112f080e7Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
177212f080e7Smrj 	 * clean, or do we need to do some munging; how many pages need to be
177312f080e7Smrj 	 * copied, etc.)
177412f080e7Smrj 	 */
177512f080e7Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
177612f080e7Smrj 	    &dma->dp_sglinfo);
177712f080e7Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
177812f080e7Smrj 
177912f080e7Smrj 	/* if we don't need a copy buffer, we don't need to sync */
178012f080e7Smrj 	if (sinfo->si_copybuf_req == 0) {
178112f080e7Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
178212f080e7Smrj 	}
178312f080e7Smrj 
178412f080e7Smrj 	/*
178500d0963fSdilpreet 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
178600d0963fSdilpreet 	 * cache.
178700d0963fSdilpreet 	 */
178800d0963fSdilpreet 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
178900d0963fSdilpreet 		hp->dmai_error.err_cf = rootnex_dma_check;
179000d0963fSdilpreet 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
179100d0963fSdilpreet 	}
179200d0963fSdilpreet 
179300d0963fSdilpreet 	/*
179412f080e7Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
179512f080e7Smrj 	 * hit the fast path. All the high performance devices should be trying
179612f080e7Smrj 	 * to hit this path. To hit this path, a device should be able to reach
179712f080e7Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
179812f080e7Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
179912f080e7Smrj 	 * handle [sgllen]).
180012f080e7Smrj 	 */
180112f080e7Smrj 	if ((sinfo->si_copybuf_req == 0) &&
180212f080e7Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
180312f080e7Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
180412f080e7Smrj 		/*
180512f080e7Smrj 		 * copy out the first cookie and ccountp, set the cookie
180612f080e7Smrj 		 * pointer to the second cookie. The first cookie is passed
180712f080e7Smrj 		 * back on the stack. Additional cookies are accessed via
180812f080e7Smrj 		 * ddi_dma_nextcookie()
180912f080e7Smrj 		 */
181012f080e7Smrj 		*cookiep = dma->dp_cookies[0];
181112f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
181212f080e7Smrj 		hp->dmai_cookie++;
181312f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
181412f080e7Smrj 		hp->dmai_nwin = 1;
181512f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
181612f080e7Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
181712f080e7Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
181812f080e7Smrj 		    dma->dp_dma.dmao_size);
181912f080e7Smrj 		return (DDI_DMA_MAPPED);
182012f080e7Smrj 	}
182112f080e7Smrj 
182212f080e7Smrj 	/*
182312f080e7Smrj 	 * go to the slow path, we may need to alloc more memory, create
182412f080e7Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
182512f080e7Smrj 	 */
182612f080e7Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
182712f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
182812f080e7Smrj 		if (dma->dp_need_to_free_cookie) {
182912f080e7Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
183012f080e7Smrj 		}
183112f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
183212f080e7Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
183312f080e7Smrj 		return (e);
183412f080e7Smrj 	}
183512f080e7Smrj 
183612f080e7Smrj 	/* if the first window uses the copy buffer, sync it for the device */
183712f080e7Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
183812f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
183912f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
184012f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
184112f080e7Smrj 	}
184212f080e7Smrj 
184312f080e7Smrj 	/*
184412f080e7Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
184512f080e7Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
184612f080e7Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
184712f080e7Smrj 	 * cookies we return is the number of cookies in the first window.
184812f080e7Smrj 	 */
184912f080e7Smrj 	if (e == DDI_DMA_MAPPED) {
185012f080e7Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
185112f080e7Smrj 		*ccountp = sinfo->si_sgl_size;
185212f080e7Smrj 	} else {
185312f080e7Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
185412f080e7Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
185512f080e7Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
185612f080e7Smrj 	}
185712f080e7Smrj 	*cookiep = dma->dp_cookies[0];
185812f080e7Smrj 	hp->dmai_cookie++;
185912f080e7Smrj 
186012f080e7Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
186112f080e7Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
186212f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
186312f080e7Smrj 	    dma->dp_dma.dmao_size);
186412f080e7Smrj 	return (e);
186512f080e7Smrj }
186612f080e7Smrj 
186712f080e7Smrj 
186812f080e7Smrj /*
186912f080e7Smrj  * rootnex_dma_unbindhdl()
187012f080e7Smrj  *    called from ddi_dma_unbind_handle()
187112f080e7Smrj  */
187212f080e7Smrj /*ARGSUSED*/
187312f080e7Smrj static int
187412f080e7Smrj rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
187512f080e7Smrj     ddi_dma_handle_t handle)
187612f080e7Smrj {
187712f080e7Smrj 	ddi_dma_impl_t *hp;
187812f080e7Smrj 	rootnex_dma_t *dma;
187912f080e7Smrj 	int e;
188012f080e7Smrj 
188112f080e7Smrj 
188212f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
188312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
188412f080e7Smrj 
188512f080e7Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
188612f080e7Smrj 	if (rootnex_unbind_verify_buffer) {
188712f080e7Smrj 		e = rootnex_verify_buffer(dma);
188812f080e7Smrj 		if (e != DDI_SUCCESS) {
188912f080e7Smrj 			ASSERT(0);
189012f080e7Smrj 			return (DDI_FAILURE);
189112f080e7Smrj 		}
189212f080e7Smrj 	}
189312f080e7Smrj 
189412f080e7Smrj 	/* sync the current window before unbinding the buffer */
189512f080e7Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
189612f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
189712f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
189812f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
189912f080e7Smrj 	}
190012f080e7Smrj 
190112f080e7Smrj 	/*
190200d0963fSdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
190300d0963fSdilpreet 	 * cache.
190400d0963fSdilpreet 	 */
190500d0963fSdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
190600d0963fSdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
190700d0963fSdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
190800d0963fSdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
190900d0963fSdilpreet 		}
191000d0963fSdilpreet 	}
191100d0963fSdilpreet 
191200d0963fSdilpreet 	/*
191312f080e7Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
191412f080e7Smrj 	 * buffer or windows, there won't be much to do :-)
191512f080e7Smrj 	 */
191612f080e7Smrj 	rootnex_teardown_copybuf(dma);
191712f080e7Smrj 	rootnex_teardown_windows(dma);
191812f080e7Smrj 
191912f080e7Smrj 	/*
192012f080e7Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
192112f080e7Smrj 	 * fit into our pre-allocate buffer), free that up now
192212f080e7Smrj 	 */
192312f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
192412f080e7Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
192512f080e7Smrj 	}
192612f080e7Smrj 
192712f080e7Smrj 	/*
192812f080e7Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
192912f080e7Smrj 	 * handle is reused).
193012f080e7Smrj 	 */
193112f080e7Smrj 	rootnex_clean_dmahdl(hp);
193212f080e7Smrj 
193312f080e7Smrj 	if (rootnex_state->r_dvma_call_list_id)
193412f080e7Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
193512f080e7Smrj 
193612f080e7Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
193712f080e7Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
193812f080e7Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
193912f080e7Smrj 
194012f080e7Smrj 	return (DDI_SUCCESS);
194112f080e7Smrj }
194212f080e7Smrj 
194312f080e7Smrj 
194412f080e7Smrj /*
194512f080e7Smrj  * rootnex_verify_buffer()
194612f080e7Smrj  *   verify buffer wasn't free'd
194712f080e7Smrj  */
194812f080e7Smrj static int
194912f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
195012f080e7Smrj {
195112f080e7Smrj 	page_t **pplist;
195212f080e7Smrj 	caddr_t vaddr;
195312f080e7Smrj 	uint_t pcnt;
195412f080e7Smrj 	uint_t poff;
195512f080e7Smrj 	page_t *pp;
195600d0963fSdilpreet 	char b;
195712f080e7Smrj 	int i;
195812f080e7Smrj 
195912f080e7Smrj 	/* Figure out how many pages this buffer occupies */
196012f080e7Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
196112f080e7Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
196212f080e7Smrj 	} else {
196312f080e7Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
196412f080e7Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
196512f080e7Smrj 	}
196612f080e7Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
196712f080e7Smrj 
196812f080e7Smrj 	switch (dma->dp_dma.dmao_type) {
196912f080e7Smrj 	case DMA_OTYP_PAGES:
197012f080e7Smrj 		/*
197112f080e7Smrj 		 * for a linked list of pp's walk through them to make sure
197212f080e7Smrj 		 * they're locked and not free.
197312f080e7Smrj 		 */
197412f080e7Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
197512f080e7Smrj 		for (i = 0; i < pcnt; i++) {
197612f080e7Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
197712f080e7Smrj 				return (DDI_FAILURE);
197812f080e7Smrj 			}
19797c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
19807c478bd9Sstevel@tonic-gate 		}
19817c478bd9Sstevel@tonic-gate 		break;
198212f080e7Smrj 
19837c478bd9Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
19847c478bd9Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
198512f080e7Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
198612f080e7Smrj 		/*
198712f080e7Smrj 		 * for an array of pp's walk through them to make sure they're
198812f080e7Smrj 		 * not free. It's possible that they may not be locked.
198912f080e7Smrj 		 */
199012f080e7Smrj 		if (pplist) {
199112f080e7Smrj 			for (i = 0; i < pcnt; i++) {
199212f080e7Smrj 				if (PP_ISFREE(pplist[i])) {
199312f080e7Smrj 					return (DDI_FAILURE);
199412f080e7Smrj 				}
199512f080e7Smrj 			}
199612f080e7Smrj 
199712f080e7Smrj 		/* For a virtual address, try to peek at each page */
199812f080e7Smrj 		} else {
199912f080e7Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
200012f080e7Smrj 				for (i = 0; i < pcnt; i++) {
200100d0963fSdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
200200d0963fSdilpreet 					    DDI_FAILURE)
200312f080e7Smrj 						return (DDI_FAILURE);
200400d0963fSdilpreet 					vaddr += MMU_PAGESIZE;
200512f080e7Smrj 				}
200612f080e7Smrj 			}
200712f080e7Smrj 		}
200812f080e7Smrj 		break;
200912f080e7Smrj 
201012f080e7Smrj 	default:
201112f080e7Smrj 		ASSERT(0);
201212f080e7Smrj 		break;
201312f080e7Smrj 	}
201412f080e7Smrj 
201512f080e7Smrj 	return (DDI_SUCCESS);
201612f080e7Smrj }
201712f080e7Smrj 
201812f080e7Smrj 
201912f080e7Smrj /*
202012f080e7Smrj  * rootnex_clean_dmahdl()
202112f080e7Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
202212f080e7Smrj  *    unbind handle. Set the handle state to the default settings.
202312f080e7Smrj  */
202412f080e7Smrj static void
202512f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
202612f080e7Smrj {
202712f080e7Smrj 	rootnex_dma_t *dma;
202812f080e7Smrj 
202912f080e7Smrj 
203012f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
203112f080e7Smrj 
203212f080e7Smrj 	hp->dmai_nwin = 0;
203312f080e7Smrj 	dma->dp_current_cookie = 0;
203412f080e7Smrj 	dma->dp_copybuf_size = 0;
203512f080e7Smrj 	dma->dp_window = NULL;
203612f080e7Smrj 	dma->dp_cbaddr = NULL;
203712f080e7Smrj 	dma->dp_inuse = B_FALSE;
203812f080e7Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
203912f080e7Smrj 	dma->dp_need_to_free_window = B_FALSE;
204012f080e7Smrj 	dma->dp_partial_required = B_FALSE;
204112f080e7Smrj 	dma->dp_trim_required = B_FALSE;
204212f080e7Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
204312f080e7Smrj #if !defined(__amd64)
204412f080e7Smrj 	dma->dp_cb_remaping = B_FALSE;
204512f080e7Smrj 	dma->dp_kva = NULL;
204612f080e7Smrj #endif
204712f080e7Smrj 
204812f080e7Smrj 	/* FMA related initialization */
204912f080e7Smrj 	hp->dmai_fault = 0;
205012f080e7Smrj 	hp->dmai_fault_check = NULL;
205112f080e7Smrj 	hp->dmai_fault_notify = NULL;
205212f080e7Smrj 	hp->dmai_error.err_ena = 0;
205312f080e7Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
205412f080e7Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
205512f080e7Smrj 	hp->dmai_error.err_ontrap = NULL;
205612f080e7Smrj 	hp->dmai_error.err_fep = NULL;
205700d0963fSdilpreet 	hp->dmai_error.err_cf = NULL;
205812f080e7Smrj }
205912f080e7Smrj 
206012f080e7Smrj 
206112f080e7Smrj /*
206212f080e7Smrj  * rootnex_valid_alloc_parms()
206312f080e7Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
206412f080e7Smrj  */
206512f080e7Smrj static int
206612f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
206712f080e7Smrj {
206812f080e7Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
206912f080e7Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
207012f080e7Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
207112f080e7Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
207212f080e7Smrj 		return (DDI_DMA_BADATTR);
207312f080e7Smrj 	}
207412f080e7Smrj 
207512f080e7Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
207612f080e7Smrj 		return (DDI_DMA_BADATTR);
207712f080e7Smrj 	}
207812f080e7Smrj 
207912f080e7Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
208012f080e7Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
208112f080e7Smrj 	    attr->dma_attr_sgllen <= 0) {
208212f080e7Smrj 		return (DDI_DMA_BADATTR);
208312f080e7Smrj 	}
208412f080e7Smrj 
208512f080e7Smrj 	/* We should be able to DMA into every byte offset in a page */
208612f080e7Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
208712f080e7Smrj 		return (DDI_DMA_BADATTR);
208812f080e7Smrj 	}
208912f080e7Smrj 
209012f080e7Smrj 	return (DDI_SUCCESS);
209112f080e7Smrj }
209212f080e7Smrj 
209312f080e7Smrj 
209412f080e7Smrj /*
209512f080e7Smrj  * rootnex_valid_bind_parms()
209612f080e7Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
209712f080e7Smrj  */
209812f080e7Smrj /* ARGSUSED */
209912f080e7Smrj static int
210012f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
210112f080e7Smrj {
210212f080e7Smrj #if !defined(__amd64)
210312f080e7Smrj 	/*
210412f080e7Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
210512f080e7Smrj 	 * we can track the offset for the obsoleted interfaces.
210612f080e7Smrj 	 */
210712f080e7Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
210812f080e7Smrj 		return (DDI_DMA_TOOBIG);
210912f080e7Smrj 	}
211012f080e7Smrj #endif
211112f080e7Smrj 
211212f080e7Smrj 	return (DDI_SUCCESS);
211312f080e7Smrj }
211412f080e7Smrj 
211512f080e7Smrj 
211612f080e7Smrj /*
211712f080e7Smrj  * rootnex_get_sgl()
211812f080e7Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
211912f080e7Smrj  *    with a call to the vm layer when vm2.0 comes around...
212012f080e7Smrj  */
212112f080e7Smrj static void
212212f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
212312f080e7Smrj     rootnex_sglinfo_t *sglinfo)
212412f080e7Smrj {
212512f080e7Smrj 	ddi_dma_atyp_t buftype;
212612f080e7Smrj 	uint64_t last_page;
212712f080e7Smrj 	uint64_t offset;
212812f080e7Smrj 	uint64_t addrhi;
212912f080e7Smrj 	uint64_t addrlo;
213012f080e7Smrj 	uint64_t maxseg;
213112f080e7Smrj 	page_t **pplist;
213212f080e7Smrj 	uint64_t paddr;
213312f080e7Smrj 	uint32_t psize;
213412f080e7Smrj 	uint32_t size;
213512f080e7Smrj 	caddr_t vaddr;
213612f080e7Smrj 	uint_t pcnt;
213712f080e7Smrj 	page_t *pp;
213812f080e7Smrj 	uint_t cnt;
213912f080e7Smrj 
214012f080e7Smrj 
214112f080e7Smrj 	/* shortcuts */
214212f080e7Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
214312f080e7Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
214412f080e7Smrj 	maxseg = sglinfo->si_max_cookie_size;
214512f080e7Smrj 	buftype = dmar_object->dmao_type;
214612f080e7Smrj 	addrhi = sglinfo->si_max_addr;
214712f080e7Smrj 	addrlo = sglinfo->si_min_addr;
214812f080e7Smrj 	size = dmar_object->dmao_size;
214912f080e7Smrj 
215012f080e7Smrj 	pcnt = 0;
215112f080e7Smrj 	cnt = 0;
215212f080e7Smrj 
215312f080e7Smrj 	/*
215412f080e7Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
215512f080e7Smrj 	 * page_t, use this to get our physical address and buf offset.
215612f080e7Smrj 	 */
215712f080e7Smrj 	if (buftype == DMA_OTYP_PAGES) {
215812f080e7Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
215912f080e7Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
216012f080e7Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
216112f080e7Smrj 		    MMU_PAGEOFFSET;
216212f080e7Smrj 		paddr = ptob64(pp->p_pagenum) + offset;
216312f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
216412f080e7Smrj 		pp = pp->p_next;
216512f080e7Smrj 		sglinfo->si_asp = NULL;
216612f080e7Smrj 
216712f080e7Smrj 	/*
216812f080e7Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
216912f080e7Smrj 	 * down an array of pages, use this to get our physical address and buf
217012f080e7Smrj 	 * offset.
217112f080e7Smrj 	 */
217212f080e7Smrj 	} else if (pplist != NULL) {
217312f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
217412f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
217512f080e7Smrj 
217612f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
217712f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
217812f080e7Smrj 		if (sglinfo->si_asp == NULL) {
217912f080e7Smrj 			sglinfo->si_asp = &kas;
218012f080e7Smrj 		}
218112f080e7Smrj 
218212f080e7Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
218312f080e7Smrj 		paddr = ptob64(pplist[pcnt]->p_pagenum);
218412f080e7Smrj 		paddr += offset;
218512f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
218612f080e7Smrj 		pcnt++;
218712f080e7Smrj 
218812f080e7Smrj 	/*
218912f080e7Smrj 	 * All we have is a virtual address, we'll need to call into the VM
219012f080e7Smrj 	 * to get the physical address.
219112f080e7Smrj 	 */
219212f080e7Smrj 	} else {
219312f080e7Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
219412f080e7Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
219512f080e7Smrj 
219612f080e7Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
219712f080e7Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
219812f080e7Smrj 		if (sglinfo->si_asp == NULL) {
219912f080e7Smrj 			sglinfo->si_asp = &kas;
220012f080e7Smrj 		}
220112f080e7Smrj 
220212f080e7Smrj 		paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
220312f080e7Smrj 		paddr += offset;
220412f080e7Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
220512f080e7Smrj 		vaddr += psize;
220612f080e7Smrj 	}
220712f080e7Smrj 
220812f080e7Smrj 	/*
220912f080e7Smrj 	 * Setup the first cookie with the physical address of the page and the
221012f080e7Smrj 	 * size of the page (which takes into account the initial offset into
221112f080e7Smrj 	 * the page.
221212f080e7Smrj 	 */
221312f080e7Smrj 	sgl[cnt].dmac_laddress = paddr;
221412f080e7Smrj 	sgl[cnt].dmac_size = psize;
221512f080e7Smrj 	sgl[cnt].dmac_type = 0;
221612f080e7Smrj 
221712f080e7Smrj 	/*
221812f080e7Smrj 	 * Save away the buffer offset into the page. We'll need this later in
221912f080e7Smrj 	 * the copy buffer code to help figure out the page index within the
222012f080e7Smrj 	 * buffer and the offset into the current page.
222112f080e7Smrj 	 */
222212f080e7Smrj 	sglinfo->si_buf_offset = offset;
222312f080e7Smrj 
222412f080e7Smrj 	/*
222512f080e7Smrj 	 * If the DMA engine can't reach the physical address, increase how
222612f080e7Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
222712f080e7Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
222812f080e7Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
222912f080e7Smrj 	 * last cookie, go to the next cookie (since we separate each page which
223012f080e7Smrj 	 * uses the copy buffer in case the copy buffer is not physically
223112f080e7Smrj 	 * contiguous.
223212f080e7Smrj 	 */
223312f080e7Smrj 	if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
223412f080e7Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
223512f080e7Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
223612f080e7Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
223712f080e7Smrj 			cnt++;
223812f080e7Smrj 			sgl[cnt].dmac_laddress = 0;
223912f080e7Smrj 			sgl[cnt].dmac_size = 0;
224012f080e7Smrj 			sgl[cnt].dmac_type = 0;
224112f080e7Smrj 		}
224212f080e7Smrj 	}
224312f080e7Smrj 
224412f080e7Smrj 	/*
224512f080e7Smrj 	 * save this page's physical address so we can figure out if the next
224612f080e7Smrj 	 * page is physically contiguous. Keep decrementing size until we are
224712f080e7Smrj 	 * done with the buffer.
224812f080e7Smrj 	 */
224912f080e7Smrj 	last_page = paddr & MMU_PAGEMASK;
225012f080e7Smrj 	size -= psize;
225112f080e7Smrj 
225212f080e7Smrj 	while (size > 0) {
225312f080e7Smrj 		/* Get the size for this page (i.e. partial or full page) */
225412f080e7Smrj 		psize = MIN(size, MMU_PAGESIZE);
225512f080e7Smrj 
225612f080e7Smrj 		if (buftype == DMA_OTYP_PAGES) {
225712f080e7Smrj 			/* get the paddr from the page_t */
225812f080e7Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
225912f080e7Smrj 			paddr = ptob64(pp->p_pagenum);
226012f080e7Smrj 			pp = pp->p_next;
226112f080e7Smrj 		} else if (pplist != NULL) {
226212f080e7Smrj 			/* index into the array of page_t's to get the paddr */
226312f080e7Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
226412f080e7Smrj 			paddr = ptob64(pplist[pcnt]->p_pagenum);
226512f080e7Smrj 			pcnt++;
226612f080e7Smrj 		} else {
226712f080e7Smrj 			/* call into the VM to get the paddr */
226812f080e7Smrj 			paddr =  ptob64(hat_getpfnum(sglinfo->si_asp->a_hat,
226912f080e7Smrj 			    vaddr));
227012f080e7Smrj 			vaddr += psize;
227112f080e7Smrj 		}
227212f080e7Smrj 
227312f080e7Smrj 		/* check to see if this page needs the copy buffer */
227412f080e7Smrj 		if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
227512f080e7Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
227612f080e7Smrj 
227712f080e7Smrj 			/*
227812f080e7Smrj 			 * if there is something in the current cookie, go to
227912f080e7Smrj 			 * the next one. We only want one page in a cookie which
228012f080e7Smrj 			 * uses the copybuf since the copybuf doesn't have to
228112f080e7Smrj 			 * be physically contiguous.
228212f080e7Smrj 			 */
228312f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
228412f080e7Smrj 				cnt++;
228512f080e7Smrj 			}
228612f080e7Smrj 			sgl[cnt].dmac_laddress = paddr;
228712f080e7Smrj 			sgl[cnt].dmac_size = psize;
228812f080e7Smrj #if defined(__amd64)
228912f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
229012f080e7Smrj #else
229112f080e7Smrj 			/*
229212f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
229312f080e7Smrj 			 * obsoleted interfaces.
229412f080e7Smrj 			 */
229512f080e7Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
229612f080e7Smrj 			    (dmar_object->dmao_size - size);
229712f080e7Smrj #endif
229812f080e7Smrj 			/* if this isn't the last cookie, go to the next one */
229912f080e7Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
230012f080e7Smrj 				cnt++;
230112f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
230212f080e7Smrj 				sgl[cnt].dmac_size = 0;
230312f080e7Smrj 				sgl[cnt].dmac_type = 0;
230412f080e7Smrj 			}
230512f080e7Smrj 
230612f080e7Smrj 		/*
230712f080e7Smrj 		 * this page didn't need the copy buffer, if it's not physically
230812f080e7Smrj 		 * contiguous, or it would put us over a segment boundary, or it
230912f080e7Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
231012f080e7Smrj 		 * have anything in it.
231112f080e7Smrj 		 */
231212f080e7Smrj 		} else if (((last_page + MMU_PAGESIZE) != paddr) ||
231312f080e7Smrj 		    !(paddr & sglinfo->si_segmask) ||
231412f080e7Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
231512f080e7Smrj 		    (sgl[cnt].dmac_size == 0)) {
231612f080e7Smrj 			/*
231712f080e7Smrj 			 * if we're not already in a new cookie, go to the next
231812f080e7Smrj 			 * cookie.
231912f080e7Smrj 			 */
232012f080e7Smrj 			if (sgl[cnt].dmac_size != 0) {
232112f080e7Smrj 				cnt++;
232212f080e7Smrj 			}
232312f080e7Smrj 
232412f080e7Smrj 			/* save the cookie information */
232512f080e7Smrj 			sgl[cnt].dmac_laddress = paddr;
232612f080e7Smrj 			sgl[cnt].dmac_size = psize;
232712f080e7Smrj #if defined(__amd64)
232812f080e7Smrj 			sgl[cnt].dmac_type = 0;
232912f080e7Smrj #else
233012f080e7Smrj 			/*
233112f080e7Smrj 			 * save the buf offset for 32-bit kernel. used in the
233212f080e7Smrj 			 * obsoleted interfaces.
233312f080e7Smrj 			 */
233412f080e7Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
233512f080e7Smrj #endif
233612f080e7Smrj 
233712f080e7Smrj 		/*
233812f080e7Smrj 		 * this page didn't need the copy buffer, it is physically
233912f080e7Smrj 		 * contiguous with the last page, and it's <= the max cookie
234012f080e7Smrj 		 * size.
234112f080e7Smrj 		 */
234212f080e7Smrj 		} else {
234312f080e7Smrj 			sgl[cnt].dmac_size += psize;
234412f080e7Smrj 
234512f080e7Smrj 			/*
234612f080e7Smrj 			 * if this exactly ==  the maximum cookie size, and
234712f080e7Smrj 			 * it isn't the last cookie, go to the next cookie.
234812f080e7Smrj 			 */
234912f080e7Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
235012f080e7Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
235112f080e7Smrj 				cnt++;
235212f080e7Smrj 				sgl[cnt].dmac_laddress = 0;
235312f080e7Smrj 				sgl[cnt].dmac_size = 0;
235412f080e7Smrj 				sgl[cnt].dmac_type = 0;
235512f080e7Smrj 			}
235612f080e7Smrj 		}
235712f080e7Smrj 
235812f080e7Smrj 		/*
235912f080e7Smrj 		 * save this page's physical address so we can figure out if the
236012f080e7Smrj 		 * next page is physically contiguous. Keep decrementing size
236112f080e7Smrj 		 * until we are done with the buffer.
236212f080e7Smrj 		 */
236312f080e7Smrj 		last_page = paddr;
236412f080e7Smrj 		size -= psize;
236512f080e7Smrj 	}
236612f080e7Smrj 
236712f080e7Smrj 	/* we're done, save away how many cookies the sgl has */
236812f080e7Smrj 	if (sgl[cnt].dmac_size == 0) {
236912f080e7Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
237012f080e7Smrj 		sglinfo->si_sgl_size = cnt;
237112f080e7Smrj 	} else {
237212f080e7Smrj 		sglinfo->si_sgl_size = cnt + 1;
237312f080e7Smrj 	}
237412f080e7Smrj }
237512f080e7Smrj 
237612f080e7Smrj 
237712f080e7Smrj /*
237812f080e7Smrj  * rootnex_bind_slowpath()
237912f080e7Smrj  *    Call in the bind path if the calling driver can't use the sgl without
238012f080e7Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
238112f080e7Smrj  *    with a partial bind.
238212f080e7Smrj  */
238312f080e7Smrj static int
238412f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
238512f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
238612f080e7Smrj {
238712f080e7Smrj 	rootnex_sglinfo_t *sinfo;
238812f080e7Smrj 	rootnex_window_t *window;
238912f080e7Smrj 	ddi_dma_cookie_t *cookie;
239012f080e7Smrj 	size_t copybuf_used;
239112f080e7Smrj 	size_t dmac_size;
239212f080e7Smrj 	boolean_t partial;
239312f080e7Smrj 	off_t cur_offset;
239412f080e7Smrj 	page_t *cur_pp;
239512f080e7Smrj 	major_t mnum;
239612f080e7Smrj 	int e;
239712f080e7Smrj 	int i;
239812f080e7Smrj 
239912f080e7Smrj 
240012f080e7Smrj 	sinfo = &dma->dp_sglinfo;
240112f080e7Smrj 	copybuf_used = 0;
240212f080e7Smrj 	partial = B_FALSE;
240312f080e7Smrj 
240412f080e7Smrj 	/*
240512f080e7Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
240612f080e7Smrj 	 * Needs to be first since it sets the copy buffer size.
240712f080e7Smrj 	 */
240812f080e7Smrj 	if (sinfo->si_copybuf_req != 0) {
240912f080e7Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
241012f080e7Smrj 		if (e != DDI_SUCCESS) {
241112f080e7Smrj 			return (e);
241212f080e7Smrj 		}
241312f080e7Smrj 	} else {
241412f080e7Smrj 		dma->dp_copybuf_size = 0;
241512f080e7Smrj 	}
241612f080e7Smrj 
241712f080e7Smrj 	/*
241812f080e7Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
241912f080e7Smrj 	 * if we need to trim the buffers when we munge the sgl.
242012f080e7Smrj 	 */
242112f080e7Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
242212f080e7Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
242312f080e7Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
242412f080e7Smrj 		dma->dp_partial_required = B_TRUE;
242512f080e7Smrj 		if (attr->dma_attr_granular != 1) {
242612f080e7Smrj 			dma->dp_trim_required = B_TRUE;
242712f080e7Smrj 		}
242812f080e7Smrj 	} else {
242912f080e7Smrj 		dma->dp_partial_required = B_FALSE;
243012f080e7Smrj 		dma->dp_trim_required = B_FALSE;
243112f080e7Smrj 	}
243212f080e7Smrj 
243312f080e7Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
243412f080e7Smrj 	if (dma->dp_partial_required &&
243512f080e7Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
243612f080e7Smrj 
243712f080e7Smrj 		mnum = ddi_driver_major(dma->dp_dip);
243812f080e7Smrj 		/*
243912f080e7Smrj 		 * patchable which allows us to print one warning per major
244012f080e7Smrj 		 * number.
244112f080e7Smrj 		 */
244212f080e7Smrj 		if ((rootnex_bind_warn) &&
244312f080e7Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
244412f080e7Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
244512f080e7Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
244612f080e7Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
244712f080e7Smrj 			    "There is a small risk of data corruption in "
244812f080e7Smrj 			    "particular with large I/Os. The driver should be "
244912f080e7Smrj 			    "replaced with a corrected version for proper "
245012f080e7Smrj 			    "system operation. To disable this warning, add "
245112f080e7Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
245212f080e7Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
245312f080e7Smrj 		}
245412f080e7Smrj 		return (DDI_DMA_TOOBIG);
245512f080e7Smrj 	}
245612f080e7Smrj 
245712f080e7Smrj 	/*
245812f080e7Smrj 	 * we might need multiple windows, setup state to handle them. In this
245912f080e7Smrj 	 * code path, we will have at least one window.
246012f080e7Smrj 	 */
246112f080e7Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
246212f080e7Smrj 	if (e != DDI_SUCCESS) {
246312f080e7Smrj 		rootnex_teardown_copybuf(dma);
246412f080e7Smrj 		return (e);
246512f080e7Smrj 	}
246612f080e7Smrj 
246712f080e7Smrj 	window = &dma->dp_window[0];
246812f080e7Smrj 	cookie = &dma->dp_cookies[0];
246912f080e7Smrj 	cur_offset = 0;
247012f080e7Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
247112f080e7Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
247212f080e7Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
247312f080e7Smrj 	}
247412f080e7Smrj 
247512f080e7Smrj 	/* loop though all the cookies we got back from get_sgl() */
247612f080e7Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
247712f080e7Smrj 		/*
247812f080e7Smrj 		 * If we're using the copy buffer, check this cookie and setup
247912f080e7Smrj 		 * its associated copy buffer state. If this cookie uses the
248012f080e7Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
248112f080e7Smrj 		 */
248212f080e7Smrj 		if (dma->dp_copybuf_size > 0) {
248312f080e7Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
248412f080e7Smrj 			    cur_offset, &copybuf_used, &cur_pp);
248512f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
248612f080e7Smrj 				window->wd_dosync = B_TRUE;
248712f080e7Smrj 			}
248812f080e7Smrj 		}
248912f080e7Smrj 
249012f080e7Smrj 		/*
249112f080e7Smrj 		 * save away the cookie size, since it could be modified in
249212f080e7Smrj 		 * the windowing code.
249312f080e7Smrj 		 */
249412f080e7Smrj 		dmac_size = cookie->dmac_size;
249512f080e7Smrj 
249612f080e7Smrj 		/* if we went over max copybuf size */
249712f080e7Smrj 		if (dma->dp_copybuf_size &&
249812f080e7Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
249912f080e7Smrj 			partial = B_TRUE;
250012f080e7Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
250112f080e7Smrj 			    cookie, cur_offset, &copybuf_used);
250212f080e7Smrj 			if (e != DDI_SUCCESS) {
250312f080e7Smrj 				rootnex_teardown_copybuf(dma);
250412f080e7Smrj 				rootnex_teardown_windows(dma);
250512f080e7Smrj 				return (e);
250612f080e7Smrj 			}
250712f080e7Smrj 
250812f080e7Smrj 			/*
250912f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
251012f080e7Smrj 			 * new window we just moved to is set to sync.
251112f080e7Smrj 			 */
251212f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
251312f080e7Smrj 				window->wd_dosync = B_TRUE;
251412f080e7Smrj 			}
251512f080e7Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
251612f080e7Smrj 			    dma->dp_dip);
251712f080e7Smrj 
251812f080e7Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
251912f080e7Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
252012f080e7Smrj 			partial = B_TRUE;
252112f080e7Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
252212f080e7Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
252312f080e7Smrj 			    cookie, attr, cur_offset);
252412f080e7Smrj 			if (e != DDI_SUCCESS) {
252512f080e7Smrj 				rootnex_teardown_copybuf(dma);
252612f080e7Smrj 				rootnex_teardown_windows(dma);
252712f080e7Smrj 				return (e);
252812f080e7Smrj 			}
252912f080e7Smrj 
253012f080e7Smrj 			/*
253112f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
253212f080e7Smrj 			 * new window we just moved to is set to sync.
253312f080e7Smrj 			 */
253412f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
253512f080e7Smrj 				window->wd_dosync = B_TRUE;
253612f080e7Smrj 			}
253712f080e7Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
253812f080e7Smrj 			    dma->dp_dip);
253912f080e7Smrj 
254012f080e7Smrj 		/* else if we will be over maxxfer */
254112f080e7Smrj 		} else if ((window->wd_size + dmac_size) >
254212f080e7Smrj 		    dma->dp_maxxfer) {
254312f080e7Smrj 			partial = B_TRUE;
254412f080e7Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
254512f080e7Smrj 			    cookie);
254612f080e7Smrj 			if (e != DDI_SUCCESS) {
254712f080e7Smrj 				rootnex_teardown_copybuf(dma);
254812f080e7Smrj 				rootnex_teardown_windows(dma);
254912f080e7Smrj 				return (e);
255012f080e7Smrj 			}
255112f080e7Smrj 
255212f080e7Smrj 			/*
255312f080e7Smrj 			 * if the coookie uses the copy buffer, make sure the
255412f080e7Smrj 			 * new window we just moved to is set to sync.
255512f080e7Smrj 			 */
255612f080e7Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
255712f080e7Smrj 				window->wd_dosync = B_TRUE;
255812f080e7Smrj 			}
255912f080e7Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
256012f080e7Smrj 			    dma->dp_dip);
256112f080e7Smrj 
256212f080e7Smrj 		/* else this cookie fits in the current window */
256312f080e7Smrj 		} else {
256412f080e7Smrj 			window->wd_cookie_cnt++;
256512f080e7Smrj 			window->wd_size += dmac_size;
256612f080e7Smrj 		}
256712f080e7Smrj 
256812f080e7Smrj 		/* track our offset into the buffer, go to the next cookie */
256912f080e7Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
257012f080e7Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
257112f080e7Smrj 		cur_offset += dmac_size;
257212f080e7Smrj 		cookie++;
257312f080e7Smrj 	}
257412f080e7Smrj 
257512f080e7Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
257612f080e7Smrj 	if (window->wd_size == 0) {
257712f080e7Smrj 		hp->dmai_nwin--;
257812f080e7Smrj 		window--;
257912f080e7Smrj 	}
258012f080e7Smrj 
258112f080e7Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
258212f080e7Smrj 
258312f080e7Smrj 	if (!partial) {
258412f080e7Smrj 		return (DDI_DMA_MAPPED);
258512f080e7Smrj 	}
258612f080e7Smrj 
258712f080e7Smrj 	ASSERT(dma->dp_partial_required);
258812f080e7Smrj 	return (DDI_DMA_PARTIAL_MAP);
258912f080e7Smrj }
259012f080e7Smrj 
259112f080e7Smrj 
259212f080e7Smrj /*
259312f080e7Smrj  * rootnex_setup_copybuf()
259412f080e7Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
259512f080e7Smrj  *    buffer, and if we do, sets up the basic state to handle it.
259612f080e7Smrj  */
259712f080e7Smrj static int
259812f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
259912f080e7Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
260012f080e7Smrj {
260112f080e7Smrj 	rootnex_sglinfo_t *sinfo;
260212f080e7Smrj 	ddi_dma_attr_t lattr;
260312f080e7Smrj 	size_t max_copybuf;
260412f080e7Smrj 	int cansleep;
260512f080e7Smrj 	int e;
260612f080e7Smrj #if !defined(__amd64)
260712f080e7Smrj 	int vmflag;
260812f080e7Smrj #endif
260912f080e7Smrj 
261012f080e7Smrj 
261112f080e7Smrj 	sinfo = &dma->dp_sglinfo;
261212f080e7Smrj 
261312f080e7Smrj 	/*
261412f080e7Smrj 	 * read this first so it's consistent through the routine so we can
261512f080e7Smrj 	 * patch it on the fly.
261612f080e7Smrj 	 */
261712f080e7Smrj 	max_copybuf = rootnex_max_copybuf_size & MMU_PAGEMASK;
261812f080e7Smrj 
261912f080e7Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
262012f080e7Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
262112f080e7Smrj 
262212f080e7Smrj 	/* make sure the copybuf size <= the max size */
262312f080e7Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
262412f080e7Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
262512f080e7Smrj 
262612f080e7Smrj #if !defined(__amd64)
262712f080e7Smrj 	/*
262812f080e7Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
262912f080e7Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
263012f080e7Smrj 	 * the 64-bit kernel.
263112f080e7Smrj 	 */
263212f080e7Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
263312f080e7Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
263412f080e7Smrj 
263512f080e7Smrj 		/* convert the sleep flags */
263612f080e7Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
263712f080e7Smrj 			vmflag = VM_SLEEP;
263812f080e7Smrj 		} else {
263912f080e7Smrj 			vmflag = VM_NOSLEEP;
264012f080e7Smrj 		}
264112f080e7Smrj 
264212f080e7Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
264312f080e7Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
264412f080e7Smrj 		    vmflag);
264512f080e7Smrj 		if (dma->dp_kva == NULL) {
264612f080e7Smrj 			return (DDI_DMA_NORESOURCES);
264712f080e7Smrj 		}
264812f080e7Smrj 	}
264912f080e7Smrj #endif
265012f080e7Smrj 
265112f080e7Smrj 	/* convert the sleep flags */
265212f080e7Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
265312f080e7Smrj 		cansleep = 1;
265412f080e7Smrj 	} else {
265512f080e7Smrj 		cansleep = 0;
265612f080e7Smrj 	}
265712f080e7Smrj 
265812f080e7Smrj 	/*
265912f080e7Smrj 	 * Allocated the actual copy buffer. This needs to fit within the DMA
266012f080e7Smrj 	 * engines limits, so we can't use kmem_alloc...
266112f080e7Smrj 	 */
266212f080e7Smrj 	lattr = *attr;
266312f080e7Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
266412f080e7Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
266512f080e7Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
266612f080e7Smrj 	if (e != DDI_SUCCESS) {
266712f080e7Smrj #if !defined(__amd64)
266812f080e7Smrj 		if (dma->dp_kva != NULL) {
266912f080e7Smrj 			vmem_free(heap_arena, dma->dp_kva,
267012f080e7Smrj 			    dma->dp_copybuf_size);
267112f080e7Smrj 		}
267212f080e7Smrj #endif
267312f080e7Smrj 		return (DDI_DMA_NORESOURCES);
267412f080e7Smrj 	}
267512f080e7Smrj 
267612f080e7Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
267712f080e7Smrj 	    size_t, dma->dp_copybuf_size);
267812f080e7Smrj 
267912f080e7Smrj 	return (DDI_SUCCESS);
268012f080e7Smrj }
268112f080e7Smrj 
268212f080e7Smrj 
268312f080e7Smrj /*
268412f080e7Smrj  * rootnex_setup_windows()
268512f080e7Smrj  *    Called in bind slowpath to setup the window state. We always have windows
268612f080e7Smrj  *    in the slowpath. Even if the window count = 1.
268712f080e7Smrj  */
268812f080e7Smrj static int
268912f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
269012f080e7Smrj     ddi_dma_attr_t *attr, int kmflag)
269112f080e7Smrj {
269212f080e7Smrj 	rootnex_window_t *windowp;
269312f080e7Smrj 	rootnex_sglinfo_t *sinfo;
269412f080e7Smrj 	size_t copy_state_size;
269512f080e7Smrj 	size_t win_state_size;
269612f080e7Smrj 	size_t state_available;
269712f080e7Smrj 	size_t space_needed;
269812f080e7Smrj 	uint_t copybuf_win;
269912f080e7Smrj 	uint_t maxxfer_win;
270012f080e7Smrj 	size_t space_used;
270112f080e7Smrj 	uint_t sglwin;
270212f080e7Smrj 
270312f080e7Smrj 
270412f080e7Smrj 	sinfo = &dma->dp_sglinfo;
270512f080e7Smrj 
270612f080e7Smrj 	dma->dp_current_win = 0;
270712f080e7Smrj 	hp->dmai_nwin = 0;
270812f080e7Smrj 
270912f080e7Smrj 	/* If we don't need to do a partial, we only have one window */
271012f080e7Smrj 	if (!dma->dp_partial_required) {
271112f080e7Smrj 		dma->dp_max_win = 1;
271212f080e7Smrj 
271312f080e7Smrj 	/*
271412f080e7Smrj 	 * we need multiple windows, need to figure out the worse case number
271512f080e7Smrj 	 * of windows.
271612f080e7Smrj 	 */
27177c478bd9Sstevel@tonic-gate 	} else {
27187c478bd9Sstevel@tonic-gate 		/*
271912f080e7Smrj 		 * if we need windows because we need more copy buffer that
272012f080e7Smrj 		 * we allow, the worse case number of windows we could need
272112f080e7Smrj 		 * here would be (copybuf space required / copybuf space that
272212f080e7Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
272312f080e7Smrj 		 * extra pages on the trim for the first and last pages of the
272412f080e7Smrj 		 * buffer (a page is the minimum window size so under the right
272512f080e7Smrj 		 * attr settings, you could have a window for each page).
272612f080e7Smrj 		 * The last page will only be hit here if the size is not a
272712f080e7Smrj 		 * multiple of the granularity (which theoretically shouldn't
272812f080e7Smrj 		 * be the case but never has been enforced, so we could have
272912f080e7Smrj 		 * broken things without it).
27307c478bd9Sstevel@tonic-gate 		 */
273112f080e7Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
273212f080e7Smrj 			ASSERT(dma->dp_copybuf_size > 0);
273312f080e7Smrj 			copybuf_win = (sinfo->si_copybuf_req /
273412f080e7Smrj 			    dma->dp_copybuf_size) + 1 + 2;
27357c478bd9Sstevel@tonic-gate 		} else {
273612f080e7Smrj 			copybuf_win = 0;
27377c478bd9Sstevel@tonic-gate 		}
273812f080e7Smrj 
273912f080e7Smrj 		/*
274012f080e7Smrj 		 * if we need windows because we have more cookies than the H/W
274112f080e7Smrj 		 * can handle, the number of windows we would need here would
274212f080e7Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
274312f080e7Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
274412f080e7Smrj 		 * (see above comment about trim)
274512f080e7Smrj 		 */
274612f080e7Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
274712f080e7Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
274812f080e7Smrj 			    + 1) + 2;
27497c478bd9Sstevel@tonic-gate 		} else {
275012f080e7Smrj 			sglwin = 0;
27517c478bd9Sstevel@tonic-gate 		}
275212f080e7Smrj 
275312f080e7Smrj 		/*
275412f080e7Smrj 		 * if we need windows because we're binding more memory than the
275512f080e7Smrj 		 * H/W can transfer at once, the number of windows we would need
275612f080e7Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
275712f080e7Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
275812f080e7Smrj 		 * trim (see above comment about trim)
275912f080e7Smrj 		 */
276012f080e7Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
276112f080e7Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
276212f080e7Smrj 			    dma->dp_maxxfer) + 1 + 2;
276312f080e7Smrj 		} else {
276412f080e7Smrj 			maxxfer_win = 0;
27657c478bd9Sstevel@tonic-gate 		}
276612f080e7Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
276712f080e7Smrj 		ASSERT(dma->dp_max_win > 0);
276812f080e7Smrj 	}
276912f080e7Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
277012f080e7Smrj 
277112f080e7Smrj 	/*
277212f080e7Smrj 	 * Get space for window and potential copy buffer state. Before we
277312f080e7Smrj 	 * go and allocate memory, see if we can get away with using what's
277412f080e7Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
277512f080e7Smrj 	 */
277612f080e7Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
277712f080e7Smrj 	    sizeof (ddi_dma_cookie_t));
277812f080e7Smrj 
277912f080e7Smrj 	/* if we dynamically allocated space for the cookies */
278012f080e7Smrj 	if (dma->dp_need_to_free_cookie) {
278112f080e7Smrj 		/* if we have more space in the pre-allocted buffer, use it */
278212f080e7Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
278312f080e7Smrj 		if ((dma->dp_cookie_size - space_used) <=
278412f080e7Smrj 		    rootnex_state->r_prealloc_size) {
278512f080e7Smrj 			state_available = rootnex_state->r_prealloc_size;
278612f080e7Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
278712f080e7Smrj 
278812f080e7Smrj 		/*
278912f080e7Smrj 		 * else, we have more free space in the dynamically allocated
279012f080e7Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
279112f080e7Smrj 		 * didn't need a lot of cookies.
279212f080e7Smrj 		 */
279312f080e7Smrj 		} else {
279412f080e7Smrj 			state_available = dma->dp_cookie_size - space_used;
279512f080e7Smrj 			windowp = (rootnex_window_t *)
279612f080e7Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
279712f080e7Smrj 		}
279812f080e7Smrj 
279912f080e7Smrj 	/* we used the pre-alloced buffer */
280012f080e7Smrj 	} else {
280112f080e7Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
280212f080e7Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
280312f080e7Smrj 		windowp = (rootnex_window_t *)
280412f080e7Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
280512f080e7Smrj 	}
280612f080e7Smrj 
280712f080e7Smrj 	/*
280812f080e7Smrj 	 * figure out how much state we need to track the copy buffer. Add an
280912f080e7Smrj 	 * addition 8 bytes for pointer alignemnt later.
281012f080e7Smrj 	 */
281112f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
281212f080e7Smrj 		copy_state_size = sinfo->si_max_pages *
281312f080e7Smrj 		    sizeof (rootnex_pgmap_t);
281412f080e7Smrj 	} else {
281512f080e7Smrj 		copy_state_size = 0;
281612f080e7Smrj 	}
281712f080e7Smrj 	/* add an additional 8 bytes for pointer alignment */
281812f080e7Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
281912f080e7Smrj 
282012f080e7Smrj 	/* if we have enough space already, use it */
282112f080e7Smrj 	if (state_available >= space_needed) {
282212f080e7Smrj 		dma->dp_window = windowp;
282312f080e7Smrj 		dma->dp_need_to_free_window = B_FALSE;
282412f080e7Smrj 
282512f080e7Smrj 	/* not enough space, need to allocate more. */
282612f080e7Smrj 	} else {
282712f080e7Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
282812f080e7Smrj 		if (dma->dp_window == NULL) {
282912f080e7Smrj 			return (DDI_DMA_NORESOURCES);
283012f080e7Smrj 		}
283112f080e7Smrj 		dma->dp_need_to_free_window = B_TRUE;
283212f080e7Smrj 		dma->dp_window_size = space_needed;
283312f080e7Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
283412f080e7Smrj 		    dma->dp_dip, size_t, space_needed);
283512f080e7Smrj 	}
283612f080e7Smrj 
283712f080e7Smrj 	/*
283812f080e7Smrj 	 * we allocate copy buffer state and window state at the same time.
283912f080e7Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
284012f080e7Smrj 	 */
284112f080e7Smrj 	if (dma->dp_copybuf_size > 0) {
284212f080e7Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
284312f080e7Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
284412f080e7Smrj 
284512f080e7Smrj #if !defined(__amd64)
284612f080e7Smrj 		/*
284712f080e7Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
284812f080e7Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
284912f080e7Smrj 		 */
285012f080e7Smrj 		bzero(dma->dp_pgmap, copy_state_size);
285112f080e7Smrj #endif
285212f080e7Smrj 	} else {
285312f080e7Smrj 		dma->dp_pgmap = NULL;
285412f080e7Smrj 	}
285512f080e7Smrj 
285612f080e7Smrj 	return (DDI_SUCCESS);
285712f080e7Smrj }
285812f080e7Smrj 
285912f080e7Smrj 
286012f080e7Smrj /*
286112f080e7Smrj  * rootnex_teardown_copybuf()
286212f080e7Smrj  *    cleans up after rootnex_setup_copybuf()
286312f080e7Smrj  */
286412f080e7Smrj static void
286512f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
286612f080e7Smrj {
286712f080e7Smrj #if !defined(__amd64)
286812f080e7Smrj 	int i;
286912f080e7Smrj 
287012f080e7Smrj 	/*
287112f080e7Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
287212f080e7Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
287312f080e7Smrj 	 * arena. Then free the VMEM space.
287412f080e7Smrj 	 */
287512f080e7Smrj 	if (dma->dp_kva != NULL) {
287612f080e7Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
287712f080e7Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
287812f080e7Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
287912f080e7Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
288012f080e7Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
288112f080e7Smrj 			}
288212f080e7Smrj 		}
288312f080e7Smrj 
288412f080e7Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
288512f080e7Smrj 	}
288612f080e7Smrj 
288712f080e7Smrj #endif
288812f080e7Smrj 
288912f080e7Smrj 	/* if we allocated a copy buffer, free it */
289012f080e7Smrj 	if (dma->dp_cbaddr != NULL) {
28917b93957cSeota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
289212f080e7Smrj 	}
289312f080e7Smrj }
289412f080e7Smrj 
289512f080e7Smrj 
289612f080e7Smrj /*
289712f080e7Smrj  * rootnex_teardown_windows()
289812f080e7Smrj  *    cleans up after rootnex_setup_windows()
289912f080e7Smrj  */
290012f080e7Smrj static void
290112f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
290212f080e7Smrj {
290312f080e7Smrj 	/*
290412f080e7Smrj 	 * if we had to allocate window state on the last bind (because we
290512f080e7Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
290612f080e7Smrj 	 */
290712f080e7Smrj 	if (dma->dp_need_to_free_window) {
290812f080e7Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
290912f080e7Smrj 	}
291012f080e7Smrj }
291112f080e7Smrj 
291212f080e7Smrj 
291312f080e7Smrj /*
291412f080e7Smrj  * rootnex_init_win()
291512f080e7Smrj  *    Called in bind slow path during creation of a new window. Initializes
291612f080e7Smrj  *    window state to default values.
291712f080e7Smrj  */
291812f080e7Smrj /*ARGSUSED*/
291912f080e7Smrj static void
292012f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
292112f080e7Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
292212f080e7Smrj {
292312f080e7Smrj 	hp->dmai_nwin++;
292412f080e7Smrj 	window->wd_dosync = B_FALSE;
292512f080e7Smrj 	window->wd_offset = cur_offset;
292612f080e7Smrj 	window->wd_size = 0;
292712f080e7Smrj 	window->wd_first_cookie = cookie;
292812f080e7Smrj 	window->wd_cookie_cnt = 0;
292912f080e7Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
293012f080e7Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
293112f080e7Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
293212f080e7Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
293312f080e7Smrj #if !defined(__amd64)
293412f080e7Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
293512f080e7Smrj #endif
293612f080e7Smrj }
293712f080e7Smrj 
293812f080e7Smrj 
293912f080e7Smrj /*
294012f080e7Smrj  * rootnex_setup_cookie()
294112f080e7Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
294212f080e7Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
294312f080e7Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
294412f080e7Smrj  *    need during sync.
294512f080e7Smrj  */
294612f080e7Smrj static void
294712f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
294812f080e7Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
294912f080e7Smrj     page_t **cur_pp)
295012f080e7Smrj {
295112f080e7Smrj 	boolean_t copybuf_sz_power_2;
295212f080e7Smrj 	rootnex_sglinfo_t *sinfo;
295312f080e7Smrj 	uint_t pidx;
295412f080e7Smrj 	uint_t pcnt;
295512f080e7Smrj 	off_t poff;
295612f080e7Smrj #if defined(__amd64)
295712f080e7Smrj 	pfn_t pfn;
295812f080e7Smrj #else
295912f080e7Smrj 	page_t **pplist;
296012f080e7Smrj #endif
296112f080e7Smrj 
296212f080e7Smrj 	sinfo = &dma->dp_sglinfo;
296312f080e7Smrj 
296412f080e7Smrj 	/*
296512f080e7Smrj 	 * Calculate the page index relative to the start of the buffer. The
296612f080e7Smrj 	 * index to the current page for our buffer is the offset into the
296712f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
296812f080e7Smrj 	 * itself, shifted of course...
296912f080e7Smrj 	 */
297012f080e7Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
297112f080e7Smrj 	ASSERT(pidx < sinfo->si_max_pages);
297212f080e7Smrj 
297312f080e7Smrj 	/* if this cookie uses the copy buffer */
297412f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
297512f080e7Smrj 		/*
297612f080e7Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
297712f080e7Smrj 		 * is <= MMU_PAGESIZE.
297812f080e7Smrj 		 */
297912f080e7Smrj 
298012f080e7Smrj 		/*
298112f080e7Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
298212f080e7Smrj 		 * pfn which we'll use with seg kpm.
298312f080e7Smrj 		 */
298412f080e7Smrj 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
298512f080e7Smrj #if defined(__amd64)
298612f080e7Smrj 		pfn = cookie->_dmu._dmac_ll >> MMU_PAGESHIFT;
298712f080e7Smrj #endif
298812f080e7Smrj 
298912f080e7Smrj 		/* figure out if the copybuf size is a power of 2 */
299012f080e7Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
299112f080e7Smrj 			copybuf_sz_power_2 = B_FALSE;
299212f080e7Smrj 		} else {
299312f080e7Smrj 			copybuf_sz_power_2 = B_TRUE;
299412f080e7Smrj 		}
299512f080e7Smrj 
299612f080e7Smrj 		/* This page uses the copy buffer */
299712f080e7Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
299812f080e7Smrj 
299912f080e7Smrj 		/*
300012f080e7Smrj 		 * save the copy buffer KVA that we'll use with this page.
300112f080e7Smrj 		 * if we still fit within the copybuf, it's a simple add.
300212f080e7Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
300312f080e7Smrj 		 */
300412f080e7Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
300512f080e7Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
300612f080e7Smrj 			    *copybuf_used;
300712f080e7Smrj 		} else {
300812f080e7Smrj 			if (copybuf_sz_power_2) {
300912f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
301012f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
301112f080e7Smrj 				    (*copybuf_used &
301212f080e7Smrj 				    (dma->dp_copybuf_size - 1)));
301312f080e7Smrj 			} else {
301412f080e7Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
301512f080e7Smrj 				    (uintptr_t)dma->dp_cbaddr +
301612f080e7Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
301712f080e7Smrj 			}
301812f080e7Smrj 		}
301912f080e7Smrj 
302012f080e7Smrj 		/*
302112f080e7Smrj 		 * over write the cookie physical address with the address of
302212f080e7Smrj 		 * the physical address of the copy buffer page that we will
302312f080e7Smrj 		 * use.
302412f080e7Smrj 		 */
302512f080e7Smrj 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
302612f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
302712f080e7Smrj 
302812f080e7Smrj 		/* if we have a kernel VA, it's easy, just save that address */
302912f080e7Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
303012f080e7Smrj 		    (sinfo->si_asp == &kas)) {
303112f080e7Smrj 			/*
303212f080e7Smrj 			 * save away the page aligned virtual address of the
303312f080e7Smrj 			 * driver buffer. Offsets are handled in the sync code.
303412f080e7Smrj 			 */
303512f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
303612f080e7Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
303712f080e7Smrj 			    & MMU_PAGEMASK);
303812f080e7Smrj #if !defined(__amd64)
303912f080e7Smrj 			/*
304012f080e7Smrj 			 * we didn't need to, and will never need to map this
304112f080e7Smrj 			 * page.
304212f080e7Smrj 			 */
304312f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
304412f080e7Smrj #endif
304512f080e7Smrj 
304612f080e7Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
304712f080e7Smrj 		} else {
304812f080e7Smrj #if defined(__amd64)
304912f080e7Smrj 			/*
305012f080e7Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
305112f080e7Smrj 			 * get a Kernel VA for the corresponding pfn.
305212f080e7Smrj 			 */
305312f080e7Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
305412f080e7Smrj #else
305512f080e7Smrj 			/*
305612f080e7Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
305712f080e7Smrj 			 * save away the page_t or user VA for this page. This
305812f080e7Smrj 			 * is needed in rootnex_dma_win() when we switch to a
305912f080e7Smrj 			 * new window which requires us to re-map the copy
306012f080e7Smrj 			 * buffer.
306112f080e7Smrj 			 */
306212f080e7Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
306312f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
306412f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
306512f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
306612f080e7Smrj 			} else if (pplist != NULL) {
306712f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
306812f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
306912f080e7Smrj 			} else {
307012f080e7Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
307112f080e7Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
307212f080e7Smrj 				    (((uintptr_t)
307312f080e7Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
307412f080e7Smrj 				    cur_offset) & MMU_PAGEMASK);
307512f080e7Smrj 			}
307612f080e7Smrj 
307712f080e7Smrj 			/*
307812f080e7Smrj 			 * save away the page aligned virtual address which was
307912f080e7Smrj 			 * allocated from the kernel heap arena (taking into
308012f080e7Smrj 			 * account if we need more copy buffer than we alloced
308112f080e7Smrj 			 * and use multiple windows to handle this, i.e. &,%).
308212f080e7Smrj 			 * NOTE: there isn't and physical memory backing up this
308312f080e7Smrj 			 * virtual address space currently.
308412f080e7Smrj 			 */
308512f080e7Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
308612f080e7Smrj 			    dma->dp_copybuf_size) {
308712f080e7Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
308812f080e7Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
308912f080e7Smrj 				    MMU_PAGEMASK);
309012f080e7Smrj 			} else {
309112f080e7Smrj 				if (copybuf_sz_power_2) {
309212f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
309312f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
309412f080e7Smrj 					    (*copybuf_used &
309512f080e7Smrj 					    (dma->dp_copybuf_size - 1))) &
309612f080e7Smrj 					    MMU_PAGEMASK);
309712f080e7Smrj 				} else {
309812f080e7Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
309912f080e7Smrj 					    (((uintptr_t)dma->dp_kva +
310012f080e7Smrj 					    (*copybuf_used %
310112f080e7Smrj 					    dma->dp_copybuf_size)) &
310212f080e7Smrj 					    MMU_PAGEMASK);
310312f080e7Smrj 				}
310412f080e7Smrj 			}
310512f080e7Smrj 
310612f080e7Smrj 			/*
310712f080e7Smrj 			 * if we haven't used up the available copy buffer yet,
310812f080e7Smrj 			 * map the kva to the physical page.
310912f080e7Smrj 			 */
311012f080e7Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
311112f080e7Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
311212f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
311312f080e7Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
311412f080e7Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
311512f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
311612f080e7Smrj 				} else {
311712f080e7Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
311812f080e7Smrj 					    sinfo->si_asp,
311912f080e7Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
312012f080e7Smrj 				}
312112f080e7Smrj 
312212f080e7Smrj 			/*
312312f080e7Smrj 			 * we've used up the available copy buffer, this page
312412f080e7Smrj 			 * will have to be mapped during rootnex_dma_win() when
312512f080e7Smrj 			 * we switch to a new window which requires a re-map
312612f080e7Smrj 			 * the copy buffer. (32-bit kernel only)
312712f080e7Smrj 			 */
312812f080e7Smrj 			} else {
312912f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
313012f080e7Smrj 			}
313112f080e7Smrj #endif
313212f080e7Smrj 			/* go to the next page_t */
313312f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
313412f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
313512f080e7Smrj 			}
313612f080e7Smrj 		}
313712f080e7Smrj 
313812f080e7Smrj 		/* add to the copy buffer count */
313912f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
314012f080e7Smrj 
314112f080e7Smrj 	/*
314212f080e7Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
314312f080e7Smrj 	 * cookie occupies to reflect this.
314412f080e7Smrj 	 */
314512f080e7Smrj 	} else {
314612f080e7Smrj 		/*
314712f080e7Smrj 		 * figure out how many pages the cookie occupies. We need to
314812f080e7Smrj 		 * use the original page offset of the buffer and the cookies
314912f080e7Smrj 		 * offset in the buffer to do this.
315012f080e7Smrj 		 */
315112f080e7Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
315212f080e7Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
315312f080e7Smrj 
315412f080e7Smrj 		while (pcnt > 0) {
315512f080e7Smrj #if !defined(__amd64)
315612f080e7Smrj 			/*
315712f080e7Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
315812f080e7Smrj 			 * to map in the driver buffer (if it didn't come down
315912f080e7Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
316012f080e7Smrj 			 * use the copy buffer, it's not, or will it ever, have
316112f080e7Smrj 			 * to be mapped in.
316212f080e7Smrj 			 */
316312f080e7Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
316412f080e7Smrj #endif
316512f080e7Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
316612f080e7Smrj 
316712f080e7Smrj 			/*
316812f080e7Smrj 			 * we need to update pidx and cur_pp or we'll loose
316912f080e7Smrj 			 * track of where we are.
317012f080e7Smrj 			 */
317112f080e7Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
317212f080e7Smrj 				*cur_pp = (*cur_pp)->p_next;
317312f080e7Smrj 			}
317412f080e7Smrj 			pidx++;
317512f080e7Smrj 			pcnt--;
317612f080e7Smrj 		}
317712f080e7Smrj 	}
317812f080e7Smrj }
317912f080e7Smrj 
318012f080e7Smrj 
318112f080e7Smrj /*
318212f080e7Smrj  * rootnex_sgllen_window_boundary()
318312f080e7Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
318412f080e7Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
318512f080e7Smrj  *    length supported by the DMA H/W.
318612f080e7Smrj  */
318712f080e7Smrj static int
318812f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
318912f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
319012f080e7Smrj     off_t cur_offset)
319112f080e7Smrj {
319212f080e7Smrj 	off_t new_offset;
319312f080e7Smrj 	size_t trim_sz;
319412f080e7Smrj 	off_t coffset;
319512f080e7Smrj 
319612f080e7Smrj 
319712f080e7Smrj 	/*
319812f080e7Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
319912f080e7Smrj 	 * the next window and init it. We're done.
320012f080e7Smrj 	 */
320112f080e7Smrj 	if (!dma->dp_trim_required) {
320212f080e7Smrj 		(*windowp)++;
320312f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
320412f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
320512f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
320612f080e7Smrj 		return (DDI_SUCCESS);
320712f080e7Smrj 	}
320812f080e7Smrj 
320912f080e7Smrj 	/* figure out how much we need to trim from the window */
321012f080e7Smrj 	ASSERT(attr->dma_attr_granular != 0);
321112f080e7Smrj 	if (dma->dp_granularity_power_2) {
321212f080e7Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
321312f080e7Smrj 	} else {
321412f080e7Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
321512f080e7Smrj 	}
321612f080e7Smrj 
321712f080e7Smrj 	/* The window's a whole multiple of granularity. We're done */
321812f080e7Smrj 	if (trim_sz == 0) {
321912f080e7Smrj 		(*windowp)++;
322012f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
322112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
322212f080e7Smrj 		(*windowp)->wd_size = cookie->dmac_size;
322312f080e7Smrj 		return (DDI_SUCCESS);
322412f080e7Smrj 	}
322512f080e7Smrj 
322612f080e7Smrj 	/*
322712f080e7Smrj 	 * The window's not a whole multiple of granularity, since we know this
322812f080e7Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
322912f080e7Smrj 	 * that one, add the left over part of the old cookie into the new
323012f080e7Smrj 	 * window, and then add in the new cookie into the new window.
323112f080e7Smrj 	 */
323212f080e7Smrj 
323312f080e7Smrj 	/*
323412f080e7Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
323512f080e7Smrj 	 * sgllen == 1 don't go together.
323612f080e7Smrj 	 */
323712f080e7Smrj 	if (attr->dma_attr_sgllen == 1) {
323812f080e7Smrj 		return (DDI_DMA_NOMAPPING);
323912f080e7Smrj 	}
324012f080e7Smrj 
324112f080e7Smrj 	/*
324212f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
324312f080e7Smrj 	 * back to the last cookie for this.
324412f080e7Smrj 	 */
324512f080e7Smrj 	cookie--;
324612f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
324712f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
324812f080e7Smrj 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
324912f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
325012f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
325112f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
325212f080e7Smrj 
325312f080e7Smrj 	/* save the buffer offsets for the next window */
325412f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
325512f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
325612f080e7Smrj 
325712f080e7Smrj 	/*
325812f080e7Smrj 	 * set this now in case this is the first window. all other cases are
325912f080e7Smrj 	 * set in dma_win()
326012f080e7Smrj 	 */
326112f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
326212f080e7Smrj 
326312f080e7Smrj 	/*
326412f080e7Smrj 	 * initialize the next window using what's left over in the previous
326512f080e7Smrj 	 * cookie.
326612f080e7Smrj 	 */
326712f080e7Smrj 	(*windowp)++;
326812f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
326912f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
327012f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
327112f080e7Smrj 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
327212f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
327312f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
327412f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
327512f080e7Smrj 	}
327612f080e7Smrj 
327712f080e7Smrj 	/*
327812f080e7Smrj 	 * now go back to the current cookie and add it to the new window. set
327912f080e7Smrj 	 * the new window size to the what was left over from the previous
328012f080e7Smrj 	 * cookie and what's in the current cookie.
328112f080e7Smrj 	 */
328212f080e7Smrj 	cookie++;
328312f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
328412f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
328512f080e7Smrj 
328612f080e7Smrj 	/*
328712f080e7Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
328812f080e7Smrj 	 * a max size of maxxfer). Handle that case.
328912f080e7Smrj 	 */
329012f080e7Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
329112f080e7Smrj 		/*
329212f080e7Smrj 		 * maxxfer is already a whole multiple of granularity, and this
329312f080e7Smrj 		 * trim will be <= the previous trim (since a cookie can't be
329412f080e7Smrj 		 * larger than maxxfer). Make things simple here.
329512f080e7Smrj 		 */
329612f080e7Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
329712f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
329812f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
329912f080e7Smrj 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
330012f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
330112f080e7Smrj 		(*windowp)->wd_size -= trim_sz;
330212f080e7Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
330312f080e7Smrj 
330412f080e7Smrj 		/* save the buffer offsets for the next window */
330512f080e7Smrj 		coffset = cookie->dmac_size - trim_sz;
330612f080e7Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
330712f080e7Smrj 
330812f080e7Smrj 		/* setup the next window */
330912f080e7Smrj 		(*windowp)++;
331012f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
331112f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
331212f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
331312f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
331412f080e7Smrj 		    coffset;
331512f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
331612f080e7Smrj 	}
331712f080e7Smrj 
331812f080e7Smrj 	return (DDI_SUCCESS);
331912f080e7Smrj }
332012f080e7Smrj 
332112f080e7Smrj 
332212f080e7Smrj /*
332312f080e7Smrj  * rootnex_copybuf_window_boundary()
332412f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we used
332512f080e7Smrj  *    up all the copy buffer that we have.
332612f080e7Smrj  */
332712f080e7Smrj static int
332812f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
332912f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
333012f080e7Smrj     size_t *copybuf_used)
333112f080e7Smrj {
333212f080e7Smrj 	rootnex_sglinfo_t *sinfo;
333312f080e7Smrj 	off_t new_offset;
333412f080e7Smrj 	size_t trim_sz;
333512f080e7Smrj 	off_t coffset;
333612f080e7Smrj 	uint_t pidx;
333712f080e7Smrj 	off_t poff;
333812f080e7Smrj 
333912f080e7Smrj 
334012f080e7Smrj 	sinfo = &dma->dp_sglinfo;
334112f080e7Smrj 
334212f080e7Smrj 	/*
334312f080e7Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
334412f080e7Smrj 	 * this cookie is <= MMU_PAGESIZE.
334512f080e7Smrj 	 */
334612f080e7Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
334712f080e7Smrj 
334812f080e7Smrj 	/*
334912f080e7Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
335012f080e7Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
335112f080e7Smrj 	 * space...
335212f080e7Smrj 	 */
335312f080e7Smrj #if !defined(__amd64)
335412f080e7Smrj 	dma->dp_cb_remaping = B_TRUE;
335512f080e7Smrj #endif
335612f080e7Smrj 
335712f080e7Smrj 	/* reset copybuf used */
335812f080e7Smrj 	*copybuf_used = 0;
335912f080e7Smrj 
336012f080e7Smrj 	/*
336112f080e7Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
336212f080e7Smrj 	 * next window and add the current cookie to it. We know the current
336312f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
336412f080e7Smrj 	 */
336512f080e7Smrj 	if (!dma->dp_trim_required) {
336612f080e7Smrj 		(*windowp)++;
336712f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
336812f080e7Smrj 
336912f080e7Smrj 		/* Add this cookie to the new window */
337012f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
337112f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
337212f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
337312f080e7Smrj 		return (DDI_SUCCESS);
337412f080e7Smrj 	}
337512f080e7Smrj 
337612f080e7Smrj 	/*
337712f080e7Smrj 	 * *** may need to trim, figure it out.
337812f080e7Smrj 	 */
337912f080e7Smrj 
338012f080e7Smrj 	/* figure out how much we need to trim from the window */
338112f080e7Smrj 	if (dma->dp_granularity_power_2) {
338212f080e7Smrj 		trim_sz = (*windowp)->wd_size &
338312f080e7Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
338412f080e7Smrj 	} else {
338512f080e7Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
338612f080e7Smrj 	}
338712f080e7Smrj 
338812f080e7Smrj 	/*
338912f080e7Smrj 	 * if the window's a whole multiple of granularity, go to the next
339012f080e7Smrj 	 * window, init it, then add in the current cookie. We know the current
339112f080e7Smrj 	 * cookie uses the copy buffer since we're in this code path.
339212f080e7Smrj 	 */
339312f080e7Smrj 	if (trim_sz == 0) {
339412f080e7Smrj 		(*windowp)++;
339512f080e7Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
339612f080e7Smrj 
339712f080e7Smrj 		/* Add this cookie to the new window */
339812f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
339912f080e7Smrj 		(*windowp)->wd_size += cookie->dmac_size;
340012f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
340112f080e7Smrj 		return (DDI_SUCCESS);
340212f080e7Smrj 	}
340312f080e7Smrj 
340412f080e7Smrj 	/*
340512f080e7Smrj 	 * *** We figured it out, we definitly need to trim
340612f080e7Smrj 	 */
340712f080e7Smrj 
340812f080e7Smrj 	/*
340912f080e7Smrj 	 * make sure the driver isn't making us do something bad...
341012f080e7Smrj 	 * Trimming and sgllen == 1 don't go together.
341112f080e7Smrj 	 */
341212f080e7Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
341312f080e7Smrj 		return (DDI_DMA_NOMAPPING);
341412f080e7Smrj 	}
341512f080e7Smrj 
341612f080e7Smrj 	/*
341712f080e7Smrj 	 * first, setup the current window to account for the trim. Need to go
341812f080e7Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
341912f080e7Smrj 	 * the current window, and some of the last cookie will be in the new
342012f080e7Smrj 	 * window. All of the current cookie will be in the new window.
342112f080e7Smrj 	 */
342212f080e7Smrj 	cookie--;
342312f080e7Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
342412f080e7Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
342512f080e7Smrj 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
342612f080e7Smrj 	ASSERT(cookie->dmac_size > trim_sz);
342712f080e7Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
342812f080e7Smrj 	(*windowp)->wd_size -= trim_sz;
342912f080e7Smrj 
343012f080e7Smrj 	/*
343112f080e7Smrj 	 * we're trimming the last cookie (not the current cookie). So that
343212f080e7Smrj 	 * last cookie may have or may not have been using the copy buffer (
343312f080e7Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
343412f080e7Smrj 	 * this code path).
343512f080e7Smrj 	 *
343612f080e7Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
343712f080e7Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
343812f080e7Smrj 	 * last page in the current window and the first page in the next
343912f080e7Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
344012f080e7Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
344112f080e7Smrj 	 * current window, and the start of the copy buffer in the next window.
344212f080e7Smrj 	 * Track that info... The cookie physical address was already set to
344312f080e7Smrj 	 * the copy buffer physical address in setup_cookie..
344412f080e7Smrj 	 */
344512f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
344612f080e7Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
344712f080e7Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
344812f080e7Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
344912f080e7Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
345012f080e7Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
345112f080e7Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
345212f080e7Smrj #if !defined(__amd64)
345312f080e7Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
345412f080e7Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
345512f080e7Smrj #endif
345612f080e7Smrj 	}
345712f080e7Smrj 
345812f080e7Smrj 	/* save the buffer offsets for the next window */
345912f080e7Smrj 	coffset = cookie->dmac_size - trim_sz;
346012f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
346112f080e7Smrj 
346212f080e7Smrj 	/*
346312f080e7Smrj 	 * set this now in case this is the first window. all other cases are
346412f080e7Smrj 	 * set in dma_win()
346512f080e7Smrj 	 */
346612f080e7Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
346712f080e7Smrj 
346812f080e7Smrj 	/*
346912f080e7Smrj 	 * initialize the next window using what's left over in the previous
347012f080e7Smrj 	 * cookie.
347112f080e7Smrj 	 */
347212f080e7Smrj 	(*windowp)++;
347312f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
347412f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
347512f080e7Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
347612f080e7Smrj 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
347712f080e7Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
347812f080e7Smrj 
347912f080e7Smrj 	/*
348012f080e7Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
348112f080e7Smrj 	 * read the comment above for more info on why we need to track
348212f080e7Smrj 	 * additional state.
348312f080e7Smrj 	 *
348412f080e7Smrj 	 * For the first cookie in the new window, we need reset the physical
348512f080e7Smrj 	 * address to DMA into to the start of the copy buffer plus any
348612f080e7Smrj 	 * initial page offset which may be present.
348712f080e7Smrj 	 */
348812f080e7Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
348912f080e7Smrj 		(*windowp)->wd_dosync = B_TRUE;
349012f080e7Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
349112f080e7Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
349212f080e7Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
349312f080e7Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
349412f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = ptob64(hat_getpfnum(
349512f080e7Smrj 		    kas.a_hat, dma->dp_cbaddr)) + poff;
349612f080e7Smrj #if !defined(__amd64)
349712f080e7Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
349812f080e7Smrj #endif
349912f080e7Smrj 		/* account for the cookie copybuf usage in the new window */
350012f080e7Smrj 		*copybuf_used += MMU_PAGESIZE;
350112f080e7Smrj 
350212f080e7Smrj 		/*
350312f080e7Smrj 		 * every piece of code has to have a hack, and here is this
350412f080e7Smrj 		 * ones :-)
350512f080e7Smrj 		 *
350612f080e7Smrj 		 * There is a complex interaction between setup_cookie and the
350712f080e7Smrj 		 * copybuf window boundary. The complexity had to be in either
350812f080e7Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
350912f080e7Smrj 		 * copybuf code.
351012f080e7Smrj 		 *
351112f080e7Smrj 		 * So in this code path, we have taken the last cookie,
351212f080e7Smrj 		 * virtually broken it in half due to the trim, and it happens
351312f080e7Smrj 		 * to use the copybuf which further complicates life. At the
351412f080e7Smrj 		 * same time, we have already setup the current cookie, which
351512f080e7Smrj 		 * is now wrong. More background info: the current cookie uses
351612f080e7Smrj 		 * the copybuf, so it is only a page long max. So we need to
351712f080e7Smrj 		 * fix the current cookies copy buffer address, physical
351812f080e7Smrj 		 * address, and kva for the 32-bit kernel. We due this by
351912f080e7Smrj 		 * bumping them by page size (of course, we can't due this on
352012f080e7Smrj 		 * the physical address since the copy buffer may not be
352112f080e7Smrj 		 * physically contiguous).
352212f080e7Smrj 		 */
352312f080e7Smrj 		cookie++;
352412f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
352512f080e7Smrj 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
352612f080e7Smrj 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
352712f080e7Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
352812f080e7Smrj #if !defined(__amd64)
352912f080e7Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
353012f080e7Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
353112f080e7Smrj #endif
353212f080e7Smrj 	} else {
353312f080e7Smrj 		/* go back to the current cookie */
353412f080e7Smrj 		cookie++;
353512f080e7Smrj 	}
353612f080e7Smrj 
353712f080e7Smrj 	/*
353812f080e7Smrj 	 * add the current cookie to the new window. set the new window size to
353912f080e7Smrj 	 * the what was left over from the previous cookie and what's in the
354012f080e7Smrj 	 * current cookie.
354112f080e7Smrj 	 */
354212f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
354312f080e7Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
354412f080e7Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
354512f080e7Smrj 
354612f080e7Smrj 	/*
354712f080e7Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
354812f080e7Smrj 	 * wouldn't be here if it didn't.
354912f080e7Smrj 	 */
355012f080e7Smrj 	*copybuf_used += MMU_PAGESIZE;
355112f080e7Smrj 
355212f080e7Smrj 	return (DDI_SUCCESS);
355312f080e7Smrj }
355412f080e7Smrj 
355512f080e7Smrj 
355612f080e7Smrj /*
355712f080e7Smrj  * rootnex_maxxfer_window_boundary()
355812f080e7Smrj  *    Called in bind slowpath when we get to a window boundary because we will
355912f080e7Smrj  *    go over maxxfer.
356012f080e7Smrj  */
356112f080e7Smrj static int
356212f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
356312f080e7Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
356412f080e7Smrj {
356512f080e7Smrj 	size_t dmac_size;
356612f080e7Smrj 	off_t new_offset;
356712f080e7Smrj 	size_t trim_sz;
356812f080e7Smrj 	off_t coffset;
356912f080e7Smrj 
357012f080e7Smrj 
357112f080e7Smrj 	/*
357212f080e7Smrj 	 * calculate how much we have to trim off of the current cookie to equal
357312f080e7Smrj 	 * maxxfer. We don't have to account for granularity here since our
357412f080e7Smrj 	 * maxxfer already takes that into account.
357512f080e7Smrj 	 */
357612f080e7Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
357712f080e7Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
357812f080e7Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
357912f080e7Smrj 
358012f080e7Smrj 	/* save cookie size since we need it later and we might change it */
358112f080e7Smrj 	dmac_size = cookie->dmac_size;
358212f080e7Smrj 
358312f080e7Smrj 	/*
358412f080e7Smrj 	 * if we're not trimming the entire cookie, setup the current window to
358512f080e7Smrj 	 * account for the trim.
358612f080e7Smrj 	 */
358712f080e7Smrj 	if (trim_sz < cookie->dmac_size) {
358812f080e7Smrj 		(*windowp)->wd_cookie_cnt++;
358912f080e7Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
359012f080e7Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
359112f080e7Smrj 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
359212f080e7Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
359312f080e7Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
359412f080e7Smrj 
359512f080e7Smrj 		/*
359612f080e7Smrj 		 * set the adjusted cookie size now in case this is the first
359712f080e7Smrj 		 * window. All other windows are taken care of in get win
359812f080e7Smrj 		 */
359912f080e7Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
360012f080e7Smrj 	}
360112f080e7Smrj 
360212f080e7Smrj 	/*
360312f080e7Smrj 	 * coffset is the current offset within the cookie, new_offset is the
360412f080e7Smrj 	 * current offset with the entire buffer.
360512f080e7Smrj 	 */
360612f080e7Smrj 	coffset = dmac_size - trim_sz;
360712f080e7Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
360812f080e7Smrj 
360912f080e7Smrj 	/* initialize the next window */
361012f080e7Smrj 	(*windowp)++;
361112f080e7Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
361212f080e7Smrj 	(*windowp)->wd_cookie_cnt++;
361312f080e7Smrj 	(*windowp)->wd_size = trim_sz;
361412f080e7Smrj 	if (trim_sz < dmac_size) {
361512f080e7Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
361612f080e7Smrj 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
361712f080e7Smrj 		    coffset;
361812f080e7Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
361912f080e7Smrj 	}
362012f080e7Smrj 
362112f080e7Smrj 	return (DDI_SUCCESS);
362212f080e7Smrj }
362312f080e7Smrj 
362412f080e7Smrj 
362512f080e7Smrj /*
362612f080e7Smrj  * rootnex_dma_sync()
362712f080e7Smrj  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
362812f080e7Smrj  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
362912f080e7Smrj  *    is set, ddi_dma_sync() returns immediately passing back success.
363012f080e7Smrj  */
363112f080e7Smrj /*ARGSUSED*/
363212f080e7Smrj static int
363312f080e7Smrj rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
363412f080e7Smrj     off_t off, size_t len, uint_t cache_flags)
363512f080e7Smrj {
363612f080e7Smrj 	rootnex_sglinfo_t *sinfo;
363712f080e7Smrj 	rootnex_pgmap_t *cbpage;
363812f080e7Smrj 	rootnex_window_t *win;
363912f080e7Smrj 	ddi_dma_impl_t *hp;
364012f080e7Smrj 	rootnex_dma_t *dma;
364112f080e7Smrj 	caddr_t fromaddr;
364212f080e7Smrj 	caddr_t toaddr;
364312f080e7Smrj 	uint_t psize;
364412f080e7Smrj 	off_t offset;
364512f080e7Smrj 	uint_t pidx;
364612f080e7Smrj 	size_t size;
364712f080e7Smrj 	off_t poff;
364812f080e7Smrj 	int e;
364912f080e7Smrj 
365012f080e7Smrj 
365112f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
365212f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
365312f080e7Smrj 	sinfo = &dma->dp_sglinfo;
365412f080e7Smrj 
365512f080e7Smrj 	/*
365612f080e7Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
365712f080e7Smrj 	 * will cause us to have at least one window.
365812f080e7Smrj 	 */
365912f080e7Smrj 	if (dma->dp_window == NULL) {
366012f080e7Smrj 		return (DDI_SUCCESS);
366112f080e7Smrj 	}
366212f080e7Smrj 
366312f080e7Smrj 	/* This window may not need to be sync'd */
366412f080e7Smrj 	win = &dma->dp_window[dma->dp_current_win];
366512f080e7Smrj 	if (!win->wd_dosync) {
366612f080e7Smrj 		return (DDI_SUCCESS);
366712f080e7Smrj 	}
366812f080e7Smrj 
366912f080e7Smrj 	/* handle off and len special cases */
367012f080e7Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
367112f080e7Smrj 		offset = win->wd_offset;
367212f080e7Smrj 	} else {
367312f080e7Smrj 		offset = off;
367412f080e7Smrj 	}
367512f080e7Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
367612f080e7Smrj 		size = win->wd_size;
367712f080e7Smrj 	} else {
367812f080e7Smrj 		size = len;
367912f080e7Smrj 	}
368012f080e7Smrj 
368112f080e7Smrj 	/* check the sync args to make sure they make a little sense */
368212f080e7Smrj 	if (rootnex_sync_check_parms) {
368312f080e7Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
368412f080e7Smrj 		    cache_flags);
368512f080e7Smrj 		if (e != DDI_SUCCESS) {
368612f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
368712f080e7Smrj 			return (DDI_FAILURE);
368812f080e7Smrj 		}
368912f080e7Smrj 	}
369012f080e7Smrj 
369112f080e7Smrj 	/*
369212f080e7Smrj 	 * special case the first page to handle the offset into the page. The
369312f080e7Smrj 	 * offset to the current page for our buffer is the offset into the
369412f080e7Smrj 	 * first page of the buffer plus our current offset into the buffer
369512f080e7Smrj 	 * itself, masked of course.
369612f080e7Smrj 	 */
369712f080e7Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
369812f080e7Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
369912f080e7Smrj 
370012f080e7Smrj 	/* go through all the pages that we want to sync */
370112f080e7Smrj 	while (size > 0) {
370212f080e7Smrj 		/*
370312f080e7Smrj 		 * Calculate the page index relative to the start of the buffer.
370412f080e7Smrj 		 * The index to the current page for our buffer is the offset
370512f080e7Smrj 		 * into the first page of the buffer plus our current offset
370612f080e7Smrj 		 * into the buffer itself, shifted of course...
370712f080e7Smrj 		 */
370812f080e7Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
370912f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
371012f080e7Smrj 
371112f080e7Smrj 		/*
371212f080e7Smrj 		 * if this page uses the copy buffer, we need to sync it,
371312f080e7Smrj 		 * otherwise, go on to the next page.
371412f080e7Smrj 		 */
371512f080e7Smrj 		cbpage = &dma->dp_pgmap[pidx];
371612f080e7Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
371712f080e7Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
371812f080e7Smrj 		if (cbpage->pm_uses_copybuf) {
371912f080e7Smrj 			/* cbaddr and kaddr should be page aligned */
372012f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
372112f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
372212f080e7Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
372312f080e7Smrj 			    MMU_PAGEOFFSET) == 0);
372412f080e7Smrj 
372512f080e7Smrj 			/*
372612f080e7Smrj 			 * if we're copying for the device, we are going to
372712f080e7Smrj 			 * copy from the drivers buffer and to the rootnex
372812f080e7Smrj 			 * allocated copy buffer.
372912f080e7Smrj 			 */
373012f080e7Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
373112f080e7Smrj 				fromaddr = cbpage->pm_kaddr + poff;
373212f080e7Smrj 				toaddr = cbpage->pm_cbaddr + poff;
373312f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
373412f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
373512f080e7Smrj 
373612f080e7Smrj 			/*
373712f080e7Smrj 			 * if we're copying for the cpu/kernel, we are going to
373812f080e7Smrj 			 * copy from the rootnex allocated copy buffer to the
373912f080e7Smrj 			 * drivers buffer.
374012f080e7Smrj 			 */
374112f080e7Smrj 			} else {
374212f080e7Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
374312f080e7Smrj 				toaddr = cbpage->pm_kaddr + poff;
374412f080e7Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
374512f080e7Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
374612f080e7Smrj 			}
374712f080e7Smrj 
374812f080e7Smrj 			bcopy(fromaddr, toaddr, psize);
374912f080e7Smrj 		}
375012f080e7Smrj 
375112f080e7Smrj 		/*
375212f080e7Smrj 		 * decrement size until we're done, update our offset into the
375312f080e7Smrj 		 * buffer, and get the next page size.
375412f080e7Smrj 		 */
375512f080e7Smrj 		size -= psize;
375612f080e7Smrj 		offset += psize;
375712f080e7Smrj 		psize = MIN(MMU_PAGESIZE, size);
375812f080e7Smrj 
375912f080e7Smrj 		/* page offset is zero for the rest of this loop */
376012f080e7Smrj 		poff = 0;
376112f080e7Smrj 	}
376212f080e7Smrj 
376312f080e7Smrj 	return (DDI_SUCCESS);
376412f080e7Smrj }
376512f080e7Smrj 
376612f080e7Smrj 
376712f080e7Smrj /*
376812f080e7Smrj  * rootnex_valid_sync_parms()
376912f080e7Smrj  *    checks the parameters passed to sync to verify they are correct.
377012f080e7Smrj  */
377112f080e7Smrj static int
377212f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
377312f080e7Smrj     off_t offset, size_t size, uint_t cache_flags)
377412f080e7Smrj {
377512f080e7Smrj 	off_t woffset;
377612f080e7Smrj 
377712f080e7Smrj 
377812f080e7Smrj 	/*
377912f080e7Smrj 	 * the first part of the test to make sure the offset passed in is
378012f080e7Smrj 	 * within the window.
378112f080e7Smrj 	 */
378212f080e7Smrj 	if (offset < win->wd_offset) {
378312f080e7Smrj 		return (DDI_FAILURE);
378412f080e7Smrj 	}
378512f080e7Smrj 
378612f080e7Smrj 	/*
378712f080e7Smrj 	 * second and last part of the test to make sure the offset and length
378812f080e7Smrj 	 * passed in is within the window.
378912f080e7Smrj 	 */
379012f080e7Smrj 	woffset = offset - win->wd_offset;
379112f080e7Smrj 	if ((woffset + size) > win->wd_size) {
379212f080e7Smrj 		return (DDI_FAILURE);
379312f080e7Smrj 	}
379412f080e7Smrj 
379512f080e7Smrj 	/*
379612f080e7Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
379712f080e7Smrj 	 * be set too.
379812f080e7Smrj 	 */
379912f080e7Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
380012f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
380112f080e7Smrj 		return (DDI_SUCCESS);
380212f080e7Smrj 	}
380312f080e7Smrj 
380412f080e7Smrj 	/*
380512f080e7Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
380612f080e7Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
380712f080e7Smrj 	 */
380812f080e7Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
380912f080e7Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
381012f080e7Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
381112f080e7Smrj 		return (DDI_SUCCESS);
381212f080e7Smrj 	}
381312f080e7Smrj 
381412f080e7Smrj 	return (DDI_FAILURE);
381512f080e7Smrj }
381612f080e7Smrj 
381712f080e7Smrj 
381812f080e7Smrj /*
381912f080e7Smrj  * rootnex_dma_win()
382012f080e7Smrj  *    called from ddi_dma_getwin()
382112f080e7Smrj  */
382212f080e7Smrj /*ARGSUSED*/
382312f080e7Smrj static int
382412f080e7Smrj rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
382512f080e7Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
382612f080e7Smrj     uint_t *ccountp)
382712f080e7Smrj {
382812f080e7Smrj 	rootnex_window_t *window;
382912f080e7Smrj 	rootnex_trim_t *trim;
383012f080e7Smrj 	ddi_dma_impl_t *hp;
383112f080e7Smrj 	rootnex_dma_t *dma;
383212f080e7Smrj #if !defined(__amd64)
383312f080e7Smrj 	rootnex_sglinfo_t *sinfo;
383412f080e7Smrj 	rootnex_pgmap_t *pmap;
383512f080e7Smrj 	uint_t pidx;
383612f080e7Smrj 	uint_t pcnt;
383712f080e7Smrj 	off_t poff;
383812f080e7Smrj 	int i;
383912f080e7Smrj #endif
384012f080e7Smrj 
384112f080e7Smrj 
384212f080e7Smrj 	hp = (ddi_dma_impl_t *)handle;
384312f080e7Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
384412f080e7Smrj #if !defined(__amd64)
384512f080e7Smrj 	sinfo = &dma->dp_sglinfo;
384612f080e7Smrj #endif
384712f080e7Smrj 
384812f080e7Smrj 	/* If we try and get a window which doesn't exist, return failure */
384912f080e7Smrj 	if (win >= hp->dmai_nwin) {
385012f080e7Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
385112f080e7Smrj 		return (DDI_FAILURE);
385212f080e7Smrj 	}
385312f080e7Smrj 
385412f080e7Smrj 	/*
385512f080e7Smrj 	 * if we don't have any windows, and they're asking for the first
385612f080e7Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
385712f080e7Smrj 	 * setup our return values, then increment the cookie since we return
385812f080e7Smrj 	 * the first cookie on the stack.
385912f080e7Smrj 	 */
386012f080e7Smrj 	if (dma->dp_window == NULL) {
386112f080e7Smrj 		if (win != 0) {
386212f080e7Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
386312f080e7Smrj 			return (DDI_FAILURE);
386412f080e7Smrj 		}
386512f080e7Smrj 		hp->dmai_cookie = dma->dp_cookies;
386612f080e7Smrj 		*offp = 0;
386712f080e7Smrj 		*lenp = dma->dp_dma.dmao_size;
386812f080e7Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
386912f080e7Smrj 		*cookiep = hp->dmai_cookie[0];
387012f080e7Smrj 		hp->dmai_cookie++;
387112f080e7Smrj 		return (DDI_SUCCESS);
387212f080e7Smrj 	}
387312f080e7Smrj 
387412f080e7Smrj 	/* sync the old window before moving on to the new one */
387512f080e7Smrj 	window = &dma->dp_window[dma->dp_current_win];
387612f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
387712f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
387812f080e7Smrj 		    DDI_DMA_SYNC_FORCPU);
387912f080e7Smrj 	}
388012f080e7Smrj 
388112f080e7Smrj #if !defined(__amd64)
388212f080e7Smrj 	/*
388312f080e7Smrj 	 * before we move to the next window, if we need to re-map, unmap all
388412f080e7Smrj 	 * the pages in this window.
388512f080e7Smrj 	 */
388612f080e7Smrj 	if (dma->dp_cb_remaping) {
388712f080e7Smrj 		/*
388812f080e7Smrj 		 * If we switch to this window again, we'll need to map in
388912f080e7Smrj 		 * on the fly next time.
389012f080e7Smrj 		 */
389112f080e7Smrj 		window->wd_remap_copybuf = B_TRUE;
389212f080e7Smrj 
389312f080e7Smrj 		/*
389412f080e7Smrj 		 * calculate the page index into the buffer where this window
389512f080e7Smrj 		 * starts, and the number of pages this window takes up.
389612f080e7Smrj 		 */
389712f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
389812f080e7Smrj 		    MMU_PAGESHIFT;
389912f080e7Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
390012f080e7Smrj 		    MMU_PAGEOFFSET;
390112f080e7Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
390212f080e7Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
390312f080e7Smrj 
390412f080e7Smrj 		/* unmap pages which are currently mapped in this window */
390512f080e7Smrj 		for (i = 0; i < pcnt; i++) {
390612f080e7Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
390712f080e7Smrj 				hat_unload(kas.a_hat,
390812f080e7Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
390912f080e7Smrj 				    HAT_UNLOAD);
391012f080e7Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
391112f080e7Smrj 			}
391212f080e7Smrj 			pidx++;
391312f080e7Smrj 		}
391412f080e7Smrj 	}
391512f080e7Smrj #endif
391612f080e7Smrj 
391712f080e7Smrj 	/*
391812f080e7Smrj 	 * Move to the new window.
391912f080e7Smrj 	 * NOTE: current_win must be set for sync to work right
392012f080e7Smrj 	 */
392112f080e7Smrj 	dma->dp_current_win = win;
392212f080e7Smrj 	window = &dma->dp_window[win];
392312f080e7Smrj 
392412f080e7Smrj 	/* if needed, adjust the first and/or last cookies for trim */
392512f080e7Smrj 	trim = &window->wd_trim;
392612f080e7Smrj 	if (trim->tr_trim_first) {
392712f080e7Smrj 		window->wd_first_cookie->_dmu._dmac_ll = trim->tr_first_paddr;
392812f080e7Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
392912f080e7Smrj #if !defined(__amd64)
393012f080e7Smrj 		window->wd_first_cookie->dmac_type =
393112f080e7Smrj 		    (window->wd_first_cookie->dmac_type &
393212f080e7Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
393312f080e7Smrj #endif
393412f080e7Smrj 		if (trim->tr_first_copybuf_win) {
393512f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
393612f080e7Smrj 			    trim->tr_first_cbaddr;
393712f080e7Smrj #if !defined(__amd64)
393812f080e7Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
393912f080e7Smrj 			    trim->tr_first_kaddr;
394012f080e7Smrj #endif
394112f080e7Smrj 		}
394212f080e7Smrj 	}
394312f080e7Smrj 	if (trim->tr_trim_last) {
394412f080e7Smrj 		trim->tr_last_cookie->_dmu._dmac_ll = trim->tr_last_paddr;
394512f080e7Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
394612f080e7Smrj 		if (trim->tr_last_copybuf_win) {
394712f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
394812f080e7Smrj 			    trim->tr_last_cbaddr;
394912f080e7Smrj #if !defined(__amd64)
395012f080e7Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
395112f080e7Smrj 			    trim->tr_last_kaddr;
395212f080e7Smrj #endif
395312f080e7Smrj 		}
395412f080e7Smrj 	}
395512f080e7Smrj 
395612f080e7Smrj 	/*
395712f080e7Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
395812f080e7Smrj 	 * our return values, then increment the cookie since we return the
395912f080e7Smrj 	 * first cookie on the stack.
396012f080e7Smrj 	 */
396112f080e7Smrj 	hp->dmai_cookie = window->wd_first_cookie;
396212f080e7Smrj 	*offp = window->wd_offset;
396312f080e7Smrj 	*lenp = window->wd_size;
396412f080e7Smrj 	*ccountp = window->wd_cookie_cnt;
396512f080e7Smrj 	*cookiep = hp->dmai_cookie[0];
396612f080e7Smrj 	hp->dmai_cookie++;
396712f080e7Smrj 
396812f080e7Smrj #if !defined(__amd64)
396912f080e7Smrj 	/* re-map copybuf if required for this window */
397012f080e7Smrj 	if (dma->dp_cb_remaping) {
397112f080e7Smrj 		/*
397212f080e7Smrj 		 * calculate the page index into the buffer where this
397312f080e7Smrj 		 * window starts.
397412f080e7Smrj 		 */
397512f080e7Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
397612f080e7Smrj 		    MMU_PAGESHIFT;
397712f080e7Smrj 		ASSERT(pidx < sinfo->si_max_pages);
397812f080e7Smrj 
397912f080e7Smrj 		/*
398012f080e7Smrj 		 * the first page can get unmapped if it's shared with the
398112f080e7Smrj 		 * previous window. Even if the rest of this window is already
398212f080e7Smrj 		 * mapped in, we need to still check this one.
398312f080e7Smrj 		 */
398412f080e7Smrj 		pmap = &dma->dp_pgmap[pidx];
398512f080e7Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
398612f080e7Smrj 			if (pmap->pm_pp != NULL) {
398712f080e7Smrj 				pmap->pm_mapped = B_TRUE;
398812f080e7Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
398912f080e7Smrj 			} else if (pmap->pm_vaddr != NULL) {
399012f080e7Smrj 				pmap->pm_mapped = B_TRUE;
399112f080e7Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
399212f080e7Smrj 				    pmap->pm_kaddr);
399312f080e7Smrj 			}
399412f080e7Smrj 		}
399512f080e7Smrj 		pidx++;
399612f080e7Smrj 
399712f080e7Smrj 		/* map in the rest of the pages if required */
399812f080e7Smrj 		if (window->wd_remap_copybuf) {
399912f080e7Smrj 			window->wd_remap_copybuf = B_FALSE;
400012f080e7Smrj 
400112f080e7Smrj 			/* figure out many pages this window takes up */
400212f080e7Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
400312f080e7Smrj 			    MMU_PAGEOFFSET;
400412f080e7Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
400512f080e7Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
400612f080e7Smrj 
400712f080e7Smrj 			/* map pages which require it */
400812f080e7Smrj 			for (i = 1; i < pcnt; i++) {
400912f080e7Smrj 				pmap = &dma->dp_pgmap[pidx];
401012f080e7Smrj 				if (pmap->pm_uses_copybuf) {
401112f080e7Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
401212f080e7Smrj 					if (pmap->pm_pp != NULL) {
401312f080e7Smrj 						pmap->pm_mapped = B_TRUE;
401412f080e7Smrj 						i86_pp_map(pmap->pm_pp,
401512f080e7Smrj 						    pmap->pm_kaddr);
401612f080e7Smrj 					} else if (pmap->pm_vaddr != NULL) {
401712f080e7Smrj 						pmap->pm_mapped = B_TRUE;
401812f080e7Smrj 						i86_va_map(pmap->pm_vaddr,
401912f080e7Smrj 						    sinfo->si_asp,
402012f080e7Smrj 						    pmap->pm_kaddr);
402112f080e7Smrj 					}
402212f080e7Smrj 				}
402312f080e7Smrj 				pidx++;
402412f080e7Smrj 			}
402512f080e7Smrj 		}
402612f080e7Smrj 	}
402712f080e7Smrj #endif
402812f080e7Smrj 
402912f080e7Smrj 	/* if the new window uses the copy buffer, sync it for the device */
403012f080e7Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
403112f080e7Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
403212f080e7Smrj 		    DDI_DMA_SYNC_FORDEV);
403312f080e7Smrj 	}
403412f080e7Smrj 
403512f080e7Smrj 	return (DDI_SUCCESS);
403612f080e7Smrj }
403712f080e7Smrj 
403812f080e7Smrj 
403912f080e7Smrj 
404012f080e7Smrj /*
404112f080e7Smrj  * ************************
404212f080e7Smrj  *  obsoleted dma routines
404312f080e7Smrj  * ************************
404412f080e7Smrj  */
404512f080e7Smrj 
404612f080e7Smrj /*
404712f080e7Smrj  * rootnex_dma_map()
404812f080e7Smrj  *    called from ddi_dma_setup()
404912f080e7Smrj  */
405012f080e7Smrj /* ARGSUSED */
405112f080e7Smrj static int
405212f080e7Smrj rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq,
405312f080e7Smrj     ddi_dma_handle_t *handlep)
405412f080e7Smrj {
405512f080e7Smrj #if defined(__amd64)
405612f080e7Smrj 	/*
405712f080e7Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
405812f080e7Smrj 	 * rootnex_dma_mctl()
405912f080e7Smrj 	 */
406012f080e7Smrj 	return (DDI_DMA_NORESOURCES);
406112f080e7Smrj 
406212f080e7Smrj #else /* 32-bit x86 kernel */
406312f080e7Smrj 	ddi_dma_handle_t *lhandlep;
406412f080e7Smrj 	ddi_dma_handle_t lhandle;
406512f080e7Smrj 	ddi_dma_cookie_t cookie;
406612f080e7Smrj 	ddi_dma_attr_t dma_attr;
406712f080e7Smrj 	ddi_dma_lim_t *dma_lim;
406812f080e7Smrj 	uint_t ccnt;
406912f080e7Smrj 	int e;
407012f080e7Smrj 
407112f080e7Smrj 
407212f080e7Smrj 	/*
407312f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
407412f080e7Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
407512f080e7Smrj 	 */
407612f080e7Smrj 	if (handlep == NULL) {
407712f080e7Smrj 		lhandlep = &lhandle;
407812f080e7Smrj 	} else {
407912f080e7Smrj 		lhandlep = handlep;
408012f080e7Smrj 	}
408112f080e7Smrj 
408212f080e7Smrj 	/* convert the limit structure to a dma_attr one */
408312f080e7Smrj 	dma_lim = dmareq->dmar_limits;
408412f080e7Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
408512f080e7Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
408612f080e7Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
408712f080e7Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
408812f080e7Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
408912f080e7Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
409012f080e7Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
409112f080e7Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
409212f080e7Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
409312f080e7Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
409412f080e7Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
409512f080e7Smrj 	dma_attr.dma_attr_flags = 0;
409612f080e7Smrj 
409712f080e7Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
409812f080e7Smrj 	    dmareq->dmar_arg, lhandlep);
409912f080e7Smrj 	if (e != DDI_SUCCESS) {
410012f080e7Smrj 		return (e);
410112f080e7Smrj 	}
410212f080e7Smrj 
410312f080e7Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
410412f080e7Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
410512f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
410612f080e7Smrj 		return (e);
410712f080e7Smrj 	}
410812f080e7Smrj 
410912f080e7Smrj 	/*
411012f080e7Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
411112f080e7Smrj 	 * free up the local state and return the result.
411212f080e7Smrj 	 */
411312f080e7Smrj 	if (handlep == NULL) {
411412f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
411512f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
411612f080e7Smrj 		if (e == DDI_DMA_MAPPED) {
411712f080e7Smrj 			return (DDI_DMA_MAPOK);
411812f080e7Smrj 		} else {
411912f080e7Smrj 			return (DDI_DMA_NOMAPPING);
412012f080e7Smrj 		}
412112f080e7Smrj 	}
412212f080e7Smrj 
412312f080e7Smrj 	return (e);
412412f080e7Smrj #endif /* defined(__amd64) */
412512f080e7Smrj }
412612f080e7Smrj 
412712f080e7Smrj 
412812f080e7Smrj /*
412912f080e7Smrj  * rootnex_dma_mctl()
413012f080e7Smrj  *
413112f080e7Smrj  */
413212f080e7Smrj /* ARGSUSED */
413312f080e7Smrj static int
413412f080e7Smrj rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
413512f080e7Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
413612f080e7Smrj     uint_t cache_flags)
413712f080e7Smrj {
413812f080e7Smrj #if defined(__amd64)
413912f080e7Smrj 	/*
414012f080e7Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
414112f080e7Smrj 	 * common implementation in genunix, so they no longer have x86
414212f080e7Smrj 	 * specific functionality which called into dma_ctl.
414312f080e7Smrj 	 *
414412f080e7Smrj 	 * The rest of the obsoleted interfaces were never supported in the
414512f080e7Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
414612f080e7Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
414712f080e7Smrj 	 * implementation issues.
414812f080e7Smrj 	 *
414912f080e7Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
415012f080e7Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
415112f080e7Smrj 	 * reflect that now too...
415212f080e7Smrj 	 *
415312f080e7Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
415412f080e7Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
415512f080e7Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
415612f080e7Smrj 	 * that in a future release.
415712f080e7Smrj 	 */
415812f080e7Smrj 	return (DDI_FAILURE);
415912f080e7Smrj 
416012f080e7Smrj #else /* 32-bit x86 kernel */
416112f080e7Smrj 	ddi_dma_cookie_t lcookie;
416212f080e7Smrj 	ddi_dma_cookie_t *cookie;
416312f080e7Smrj 	rootnex_window_t *window;
416412f080e7Smrj 	ddi_dma_impl_t *hp;
416512f080e7Smrj 	rootnex_dma_t *dma;
416612f080e7Smrj 	uint_t nwin;
416712f080e7Smrj 	uint_t ccnt;
416812f080e7Smrj 	size_t len;
416912f080e7Smrj 	off_t off;
417012f080e7Smrj 	int e;
417112f080e7Smrj 
417212f080e7Smrj 
417312f080e7Smrj 	/*
417412f080e7Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
417512f080e7Smrj 	 * hacky since were optimizing for the current interfaces and so we can
417612f080e7Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
417712f080e7Smrj 	 * obsoleted routines someday soon.
417812f080e7Smrj 	 */
417912f080e7Smrj 
418012f080e7Smrj 	switch (request) {
418112f080e7Smrj 
418212f080e7Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
418312f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
418412f080e7Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
418512f080e7Smrj 
418612f080e7Smrj 		/*
418712f080e7Smrj 		 * convert segment to cookie. We don't distinguish between the
418812f080e7Smrj 		 * two :-)
418912f080e7Smrj 		 */
419012f080e7Smrj 		*cookie = *hp->dmai_cookie;
419112f080e7Smrj 		*lenp = cookie->dmac_size;
419212f080e7Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
419312f080e7Smrj 		return (DDI_SUCCESS);
419412f080e7Smrj 
419512f080e7Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
419612f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
419712f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
419812f080e7Smrj 
419912f080e7Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
420012f080e7Smrj 			return (DDI_DMA_STALE);
420112f080e7Smrj 		}
420212f080e7Smrj 
420312f080e7Smrj 		/* handle the case where we don't have any windows */
420412f080e7Smrj 		if (dma->dp_window == NULL) {
420512f080e7Smrj 			/*
420612f080e7Smrj 			 * if seg == NULL, and we don't have any windows,
420712f080e7Smrj 			 * return the first cookie in the sgl.
420812f080e7Smrj 			 */
420912f080e7Smrj 			if (*lenp == NULL) {
421012f080e7Smrj 				dma->dp_current_cookie = 0;
421112f080e7Smrj 				hp->dmai_cookie = dma->dp_cookies;
421212f080e7Smrj 				*objpp = (caddr_t)handle;
421312f080e7Smrj 				return (DDI_SUCCESS);
421412f080e7Smrj 
421512f080e7Smrj 			/* if we have more cookies, go to the next cookie */
421612f080e7Smrj 			} else {
421712f080e7Smrj 				if ((dma->dp_current_cookie + 1) >=
421812f080e7Smrj 				    dma->dp_sglinfo.si_sgl_size) {
421912f080e7Smrj 					return (DDI_DMA_DONE);
422012f080e7Smrj 				}
422112f080e7Smrj 				dma->dp_current_cookie++;
422212f080e7Smrj 				hp->dmai_cookie++;
422312f080e7Smrj 				return (DDI_SUCCESS);
422412f080e7Smrj 			}
422512f080e7Smrj 		}
422612f080e7Smrj 
422712f080e7Smrj 		/* We have one or more windows */
422812f080e7Smrj 		window = &dma->dp_window[dma->dp_current_win];
422912f080e7Smrj 
423012f080e7Smrj 		/*
423112f080e7Smrj 		 * if seg == NULL, return the first cookie in the current
423212f080e7Smrj 		 * window
423312f080e7Smrj 		 */
423412f080e7Smrj 		if (*lenp == NULL) {
423512f080e7Smrj 			dma->dp_current_cookie = 0;
4236cf4e9a1dSmrj 			hp->dmai_cookie = window->wd_first_cookie;
423712f080e7Smrj 
423812f080e7Smrj 		/*
423912f080e7Smrj 		 * go to the next cookie in the window then see if we done with
424012f080e7Smrj 		 * this window.
424112f080e7Smrj 		 */
424212f080e7Smrj 		} else {
424312f080e7Smrj 			if ((dma->dp_current_cookie + 1) >=
424412f080e7Smrj 			    window->wd_cookie_cnt) {
424512f080e7Smrj 				return (DDI_DMA_DONE);
424612f080e7Smrj 			}
424712f080e7Smrj 			dma->dp_current_cookie++;
424812f080e7Smrj 			hp->dmai_cookie++;
424912f080e7Smrj 		}
425012f080e7Smrj 		*objpp = (caddr_t)handle;
425112f080e7Smrj 		return (DDI_SUCCESS);
425212f080e7Smrj 
425312f080e7Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
425412f080e7Smrj 		hp = (ddi_dma_impl_t *)handle;
425512f080e7Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
425612f080e7Smrj 
425712f080e7Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
425812f080e7Smrj 			return (DDI_DMA_STALE);
425912f080e7Smrj 		}
426012f080e7Smrj 
426112f080e7Smrj 		/* if win == NULL, return the first window in the bind */
426212f080e7Smrj 		if (*offp == NULL) {
426312f080e7Smrj 			nwin = 0;
426412f080e7Smrj 
426512f080e7Smrj 		/*
426612f080e7Smrj 		 * else, go to the next window then see if we're done with all
426712f080e7Smrj 		 * the windows.
426812f080e7Smrj 		 */
426912f080e7Smrj 		} else {
427012f080e7Smrj 			nwin = dma->dp_current_win + 1;
427112f080e7Smrj 			if (nwin >= hp->dmai_nwin) {
427212f080e7Smrj 				return (DDI_DMA_DONE);
427312f080e7Smrj 			}
427412f080e7Smrj 		}
427512f080e7Smrj 
427612f080e7Smrj 		/* switch to the next window */
427712f080e7Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
427812f080e7Smrj 		    &lcookie, &ccnt);
427912f080e7Smrj 		ASSERT(e == DDI_SUCCESS);
428012f080e7Smrj 		if (e != DDI_SUCCESS) {
428112f080e7Smrj 			return (DDI_DMA_STALE);
428212f080e7Smrj 		}
428312f080e7Smrj 
428412f080e7Smrj 		/* reset the cookie back to the first cookie in the window */
428512f080e7Smrj 		if (dma->dp_window != NULL) {
428612f080e7Smrj 			window = &dma->dp_window[dma->dp_current_win];
428712f080e7Smrj 			hp->dmai_cookie = window->wd_first_cookie;
428812f080e7Smrj 		} else {
428912f080e7Smrj 			hp->dmai_cookie = dma->dp_cookies;
429012f080e7Smrj 		}
429112f080e7Smrj 
429212f080e7Smrj 		*objpp = (caddr_t)handle;
429312f080e7Smrj 		return (DDI_SUCCESS);
429412f080e7Smrj 
429512f080e7Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
429612f080e7Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
429712f080e7Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
429812f080e7Smrj 		if (rootnex_state->r_dvma_call_list_id) {
429912f080e7Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
430012f080e7Smrj 		}
430112f080e7Smrj 		return (DDI_SUCCESS);
430212f080e7Smrj 
430312f080e7Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
430412f080e7Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
430512f080e7Smrj 		/* should never get here, handled in genunix */
430612f080e7Smrj 		ASSERT(0);
430712f080e7Smrj 		return (DDI_FAILURE);
430812f080e7Smrj 
430912f080e7Smrj 	case DDI_DMA_KVADDR:
431012f080e7Smrj 	case DDI_DMA_GETERR:
431112f080e7Smrj 	case DDI_DMA_COFF:
431212f080e7Smrj 		return (DDI_FAILURE);
431312f080e7Smrj 	}
431412f080e7Smrj 
431512f080e7Smrj 	return (DDI_FAILURE);
431612f080e7Smrj #endif /* defined(__amd64) */
43177c478bd9Sstevel@tonic-gate }
43187aec1d6eScindi 
431900d0963fSdilpreet 
432000d0963fSdilpreet /*
432100d0963fSdilpreet  * *********
432200d0963fSdilpreet  *  FMA Code
432300d0963fSdilpreet  * *********
432400d0963fSdilpreet  */
432500d0963fSdilpreet 
432600d0963fSdilpreet /*
432700d0963fSdilpreet  * rootnex_fm_init()
432800d0963fSdilpreet  *    FMA init busop
432900d0963fSdilpreet  */
43307aec1d6eScindi /* ARGSUSED */
43317aec1d6eScindi static int
433200d0963fSdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
433300d0963fSdilpreet     ddi_iblock_cookie_t *ibc)
43347aec1d6eScindi {
433500d0963fSdilpreet 	*ibc = rootnex_state->r_err_ibc;
433600d0963fSdilpreet 
433700d0963fSdilpreet 	return (ddi_system_fmcap);
433800d0963fSdilpreet }
433900d0963fSdilpreet 
434000d0963fSdilpreet /*
434100d0963fSdilpreet  * rootnex_dma_check()
434200d0963fSdilpreet  *    Function called after a dma fault occurred to find out whether the
434300d0963fSdilpreet  *    fault address is associated with a driver that is able to handle faults
434400d0963fSdilpreet  *    and recover from faults.
434500d0963fSdilpreet  */
434600d0963fSdilpreet /* ARGSUSED */
434700d0963fSdilpreet static int
434800d0963fSdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
434900d0963fSdilpreet     const void *not_used)
435000d0963fSdilpreet {
435100d0963fSdilpreet 	rootnex_window_t *window;
435200d0963fSdilpreet 	uint64_t start_addr;
435300d0963fSdilpreet 	uint64_t fault_addr;
435400d0963fSdilpreet 	ddi_dma_impl_t *hp;
435500d0963fSdilpreet 	rootnex_dma_t *dma;
435600d0963fSdilpreet 	uint64_t end_addr;
435700d0963fSdilpreet 	size_t csize;
435800d0963fSdilpreet 	int i;
435900d0963fSdilpreet 	int j;
436000d0963fSdilpreet 
436100d0963fSdilpreet 
436200d0963fSdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
436300d0963fSdilpreet 	hp = (ddi_dma_impl_t *)handle;
436400d0963fSdilpreet 	ASSERT(hp);
436500d0963fSdilpreet 
436600d0963fSdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
436700d0963fSdilpreet 
436800d0963fSdilpreet 	/* Get the address that we need to search for */
436900d0963fSdilpreet 	fault_addr = *(uint64_t *)addr;
437000d0963fSdilpreet 
437100d0963fSdilpreet 	/*
437200d0963fSdilpreet 	 * if we don't have any windows, we can just walk through all the
437300d0963fSdilpreet 	 * cookies.
437400d0963fSdilpreet 	 */
437500d0963fSdilpreet 	if (dma->dp_window == NULL) {
437600d0963fSdilpreet 		/* for each cookie */
437700d0963fSdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
437800d0963fSdilpreet 			/*
437900d0963fSdilpreet 			 * if the faulted address is within the physical address
438000d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
438100d0963fSdilpreet 			 */
438200d0963fSdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
438300d0963fSdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
438400d0963fSdilpreet 			    dma->dp_cookies[i].dmac_size))) {
438500d0963fSdilpreet 				return (DDI_FM_NONFATAL);
438600d0963fSdilpreet 			}
438700d0963fSdilpreet 		}
438800d0963fSdilpreet 
438900d0963fSdilpreet 		/* fault_addr not within this DMA handle */
439000d0963fSdilpreet 		return (DDI_FM_UNKNOWN);
439100d0963fSdilpreet 	}
439200d0963fSdilpreet 
439300d0963fSdilpreet 	/* we have mutiple windows, walk through each window */
439400d0963fSdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
439500d0963fSdilpreet 		window = &dma->dp_window[i];
439600d0963fSdilpreet 
439700d0963fSdilpreet 		/* Go through all the cookies in the window */
439800d0963fSdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
439900d0963fSdilpreet 
440000d0963fSdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
440100d0963fSdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
440200d0963fSdilpreet 
440300d0963fSdilpreet 			/*
440400d0963fSdilpreet 			 * if we are trimming the first cookie in the window,
440500d0963fSdilpreet 			 * and this is the first cookie, adjust the start
440600d0963fSdilpreet 			 * address and size of the cookie to account for the
440700d0963fSdilpreet 			 * trim.
440800d0963fSdilpreet 			 */
440900d0963fSdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
441000d0963fSdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
441100d0963fSdilpreet 				csize = window->wd_trim.tr_first_size;
441200d0963fSdilpreet 			}
441300d0963fSdilpreet 
441400d0963fSdilpreet 			/*
441500d0963fSdilpreet 			 * if we are trimming the last cookie in the window,
441600d0963fSdilpreet 			 * and this is the last cookie, adjust the start
441700d0963fSdilpreet 			 * address and size of the cookie to account for the
441800d0963fSdilpreet 			 * trim.
441900d0963fSdilpreet 			 */
442000d0963fSdilpreet 			if (window->wd_trim.tr_trim_last &&
442100d0963fSdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
442200d0963fSdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
442300d0963fSdilpreet 				csize = window->wd_trim.tr_last_size;
442400d0963fSdilpreet 			}
442500d0963fSdilpreet 
442600d0963fSdilpreet 			end_addr = start_addr + csize;
442700d0963fSdilpreet 
442800d0963fSdilpreet 			/*
442900d0963fSdilpreet 			 * if the faulted address is within the physical address
443000d0963fSdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
443100d0963fSdilpreet 			 */
443200d0963fSdilpreet 			if ((fault_addr >= start_addr) &&
443300d0963fSdilpreet 			    (fault_addr <= end_addr)) {
443400d0963fSdilpreet 				return (DDI_FM_NONFATAL);
443500d0963fSdilpreet 			}
443600d0963fSdilpreet 		}
443700d0963fSdilpreet 	}
443800d0963fSdilpreet 
443900d0963fSdilpreet 	/* fault_addr not within this DMA handle */
444000d0963fSdilpreet 	return (DDI_FM_UNKNOWN);
44417aec1d6eScindi }
4442