xref: /illumos-gate/usr/src/uts/i86pc/io/rootnex.c (revision 0b16192fc3f190a96eb5b0c4e1eb3018af728e39)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
26  * Copyright (c) 2011 Bayard G. Bell.  All rights reserved.
27  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
28  * Copyright 2017 Joyent, Inc.
29  */
30 
31 /*
32  * x86 root nexus driver
33  */
34 
35 #include <sys/sysmacros.h>
36 #include <sys/conf.h>
37 #include <sys/autoconf.h>
38 #include <sys/sysmacros.h>
39 #include <sys/debug.h>
40 #include <sys/psw.h>
41 #include <sys/ddidmareq.h>
42 #include <sys/promif.h>
43 #include <sys/devops.h>
44 #include <sys/kmem.h>
45 #include <sys/cmn_err.h>
46 #include <vm/seg.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_dev.h>
49 #include <sys/vmem.h>
50 #include <sys/mman.h>
51 #include <vm/hat.h>
52 #include <vm/as.h>
53 #include <vm/page.h>
54 #include <sys/avintr.h>
55 #include <sys/errno.h>
56 #include <sys/modctl.h>
57 #include <sys/ddi_impldefs.h>
58 #include <sys/sunddi.h>
59 #include <sys/sunndi.h>
60 #include <sys/mach_intr.h>
61 #include <sys/psm.h>
62 #include <sys/ontrap.h>
63 #include <sys/atomic.h>
64 #include <sys/sdt.h>
65 #include <sys/rootnex.h>
66 #include <vm/hat_i86.h>
67 #include <sys/ddifm.h>
68 #include <sys/ddi_isa.h>
69 #include <sys/apic.h>
70 
71 #ifdef __xpv
72 #include <sys/bootinfo.h>
73 #include <sys/hypervisor.h>
74 #include <sys/bootconf.h>
75 #include <vm/kboot_mmu.h>
76 #endif
77 
78 #if defined(__amd64) && !defined(__xpv)
79 #include <sys/immu.h>
80 #endif
81 
82 
83 /*
84  * enable/disable extra checking of function parameters. Useful for debugging
85  * drivers.
86  */
87 #ifdef	DEBUG
88 int rootnex_alloc_check_parms = 1;
89 int rootnex_bind_check_parms = 1;
90 int rootnex_bind_check_inuse = 1;
91 int rootnex_unbind_verify_buffer = 0;
92 int rootnex_sync_check_parms = 1;
93 #else
94 int rootnex_alloc_check_parms = 0;
95 int rootnex_bind_check_parms = 0;
96 int rootnex_bind_check_inuse = 0;
97 int rootnex_unbind_verify_buffer = 0;
98 int rootnex_sync_check_parms = 0;
99 #endif
100 
101 boolean_t rootnex_dmar_not_setup;
102 
103 /* Master Abort and Target Abort panic flag */
104 int rootnex_fm_ma_ta_panic_flag = 0;
105 
106 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
107 int rootnex_bind_fail = 1;
108 int rootnex_bind_warn = 1;
109 uint8_t *rootnex_warn_list;
110 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
111 #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
112 
113 /*
114  * revert back to old broken behavior of always sync'ing entire copy buffer.
115  * This is useful if be have a buggy driver which doesn't correctly pass in
116  * the offset and size into ddi_dma_sync().
117  */
118 int rootnex_sync_ignore_params = 0;
119 
120 /*
121  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
122  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
123  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
124  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
125  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
126  * (< 8K). We will still need to allocate the copy buffer during bind though
127  * (if we need one). These can only be modified in /etc/system before rootnex
128  * attach.
129  */
130 #if defined(__amd64)
131 int rootnex_prealloc_cookies = 65;
132 int rootnex_prealloc_windows = 4;
133 int rootnex_prealloc_copybuf = 2;
134 #else
135 int rootnex_prealloc_cookies = 33;
136 int rootnex_prealloc_windows = 4;
137 int rootnex_prealloc_copybuf = 2;
138 #endif
139 
140 /* driver global state */
141 static rootnex_state_t *rootnex_state;
142 
143 #ifdef DEBUG
144 /* shortcut to rootnex counters */
145 static uint64_t *rootnex_cnt;
146 #endif
147 
148 /*
149  * XXX - does x86 even need these or are they left over from the SPARC days?
150  */
151 /* statically defined integer/boolean properties for the root node */
152 static rootnex_intprop_t rootnex_intprp[] = {
153 	{ "PAGESIZE",			PAGESIZE },
154 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
155 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
156 	{ DDI_RELATIVE_ADDRESSING,	1 },
157 };
158 #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
159 
160 /*
161  * If we're dom0, we're using a real device so we need to load
162  * the cookies with MFNs instead of PFNs.
163  */
164 #ifdef __xpv
165 typedef maddr_t rootnex_addr_t;
166 #define	ROOTNEX_PADDR_TO_RBASE(pa)	\
167 	(DOMAIN_IS_INITDOMAIN(xen_info) ? pa_to_ma(pa) : (pa))
168 #else
169 typedef paddr_t rootnex_addr_t;
170 #define	ROOTNEX_PADDR_TO_RBASE(pa)	(pa)
171 #endif
172 
173 static struct cb_ops rootnex_cb_ops = {
174 	nodev,		/* open */
175 	nodev,		/* close */
176 	nodev,		/* strategy */
177 	nodev,		/* print */
178 	nodev,		/* dump */
179 	nodev,		/* read */
180 	nodev,		/* write */
181 	nodev,		/* ioctl */
182 	nodev,		/* devmap */
183 	nodev,		/* mmap */
184 	nodev,		/* segmap */
185 	nochpoll,	/* chpoll */
186 	ddi_prop_op,	/* cb_prop_op */
187 	NULL,		/* struct streamtab */
188 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
189 	CB_REV,		/* Rev */
190 	nodev,		/* cb_aread */
191 	nodev		/* cb_awrite */
192 };
193 
194 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
195     off_t offset, off_t len, caddr_t *vaddrp);
196 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
197     struct hat *hat, struct seg *seg, caddr_t addr,
198     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
199 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
200     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
201     ddi_dma_handle_t *handlep);
202 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
203     ddi_dma_handle_t handle);
204 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
205     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
206     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
207 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
208     ddi_dma_handle_t handle);
209 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
210     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
211 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
212     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
213     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
214 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
215     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
216     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
217 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
218     ddi_ctl_enum_t ctlop, void *arg, void *result);
219 static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
220     ddi_iblock_cookie_t *ibc);
221 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
222     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
223 static int rootnex_alloc_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *,
224     void *);
225 static int rootnex_free_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *);
226 
227 static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
228     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
229     ddi_dma_handle_t *handlep);
230 static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
231     ddi_dma_handle_t handle);
232 static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
233     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
234     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
235 static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
236     ddi_dma_handle_t handle);
237 #if defined(__amd64) && !defined(__xpv)
238 static void rootnex_coredma_reset_cookies(dev_info_t *dip,
239     ddi_dma_handle_t handle);
240 static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
241     ddi_dma_cookie_t **cookiepp, uint_t *ccountp);
242 static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
243     ddi_dma_cookie_t *cookiep, uint_t ccount);
244 static int rootnex_coredma_clear_cookies(dev_info_t *dip,
245     ddi_dma_handle_t handle);
246 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle);
247 #endif
248 static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
249     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
250 static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
251     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
252     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
253 
254 #if defined(__amd64) && !defined(__xpv)
255 static int rootnex_coredma_hdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
256     ddi_dma_handle_t handle, void *v);
257 static void *rootnex_coredma_hdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
258     ddi_dma_handle_t handle);
259 #endif
260 
261 
262 static struct bus_ops rootnex_bus_ops = {
263 	BUSO_REV,
264 	rootnex_map,
265 	NULL,
266 	NULL,
267 	NULL,
268 	rootnex_map_fault,
269 	0,
270 	rootnex_dma_allochdl,
271 	rootnex_dma_freehdl,
272 	rootnex_dma_bindhdl,
273 	rootnex_dma_unbindhdl,
274 	rootnex_dma_sync,
275 	rootnex_dma_win,
276 	rootnex_dma_mctl,
277 	rootnex_ctlops,
278 	ddi_bus_prop_op,
279 	i_ddi_rootnex_get_eventcookie,
280 	i_ddi_rootnex_add_eventcall,
281 	i_ddi_rootnex_remove_eventcall,
282 	i_ddi_rootnex_post_event,
283 	0,			/* bus_intr_ctl */
284 	0,			/* bus_config */
285 	0,			/* bus_unconfig */
286 	rootnex_fm_init,	/* bus_fm_init */
287 	NULL,			/* bus_fm_fini */
288 	NULL,			/* bus_fm_access_enter */
289 	NULL,			/* bus_fm_access_exit */
290 	NULL,			/* bus_powr */
291 	rootnex_intr_ops	/* bus_intr_op */
292 };
293 
294 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
295 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
296 static int rootnex_quiesce(dev_info_t *dip);
297 
298 static struct dev_ops rootnex_ops = {
299 	DEVO_REV,
300 	0,
301 	ddi_no_info,
302 	nulldev,
303 	nulldev,
304 	rootnex_attach,
305 	rootnex_detach,
306 	nulldev,
307 	&rootnex_cb_ops,
308 	&rootnex_bus_ops,
309 	NULL,
310 	rootnex_quiesce,		/* quiesce */
311 };
312 
313 static struct modldrv rootnex_modldrv = {
314 	&mod_driverops,
315 	"i86pc root nexus",
316 	&rootnex_ops
317 };
318 
319 static struct modlinkage rootnex_modlinkage = {
320 	MODREV_1,
321 	(void *)&rootnex_modldrv,
322 	NULL
323 };
324 
325 #if defined(__amd64) && !defined(__xpv)
326 static iommulib_nexops_t iommulib_nexops = {
327 	IOMMU_NEXOPS_VERSION,
328 	"Rootnex IOMMU ops Vers 1.1",
329 	NULL,
330 	rootnex_coredma_allochdl,
331 	rootnex_coredma_freehdl,
332 	rootnex_coredma_bindhdl,
333 	rootnex_coredma_unbindhdl,
334 	rootnex_coredma_reset_cookies,
335 	rootnex_coredma_get_cookies,
336 	rootnex_coredma_set_cookies,
337 	rootnex_coredma_clear_cookies,
338 	rootnex_coredma_get_sleep_flags,
339 	rootnex_coredma_sync,
340 	rootnex_coredma_win,
341 	rootnex_coredma_hdl_setprivate,
342 	rootnex_coredma_hdl_getprivate
343 };
344 #endif
345 
346 /*
347  *  extern hacks
348  */
349 extern struct seg_ops segdev_ops;
350 extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
351 #ifdef	DDI_MAP_DEBUG
352 extern int ddi_map_debug_flag;
353 #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
354 #endif
355 extern void i86_pp_map(page_t *pp, caddr_t kaddr);
356 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
357 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
358     psm_intr_op_t, int *);
359 extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
360 extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
361 
362 /*
363  * Use device arena to use for device control register mappings.
364  * Various kernel memory walkers (debugger, dtrace) need to know
365  * to avoid this address range to prevent undesired device activity.
366  */
367 extern void *device_arena_alloc(size_t size, int vm_flag);
368 extern void device_arena_free(void * vaddr, size_t size);
369 
370 
371 /*
372  *  Internal functions
373  */
374 static int rootnex_dma_init();
375 static void rootnex_add_props(dev_info_t *);
376 static int rootnex_ctl_reportdev(dev_info_t *dip);
377 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
378 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
379 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
380 static int rootnex_map_handle(ddi_map_req_t *mp);
381 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
382 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
383 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
384     ddi_dma_attr_t *attr);
385 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
386     rootnex_sglinfo_t *sglinfo);
387 static void rootnex_dvma_get_sgl(ddi_dma_obj_t *dmar_object,
388     ddi_dma_cookie_t *sgl, rootnex_sglinfo_t *sglinfo);
389 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
390     rootnex_dma_t *dma, ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag);
391 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
392     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
393 static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
394 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
395     ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag);
396 static void rootnex_teardown_windows(rootnex_dma_t *dma);
397 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
398     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
399 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
400     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
401     size_t *copybuf_used, page_t **cur_pp);
402 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
403     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
404     ddi_dma_attr_t *attr, off_t cur_offset);
405 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
406     rootnex_dma_t *dma, rootnex_window_t **windowp,
407     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
408 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
409     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
410 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
411     off_t offset, size_t size, uint_t cache_flags);
412 static int rootnex_verify_buffer(rootnex_dma_t *dma);
413 static int rootnex_dma_check(dev_info_t *dip, const void *handle,
414     const void *comp_addr, const void *not_used);
415 static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object,
416     rootnex_sglinfo_t *sglinfo);
417 static struct as *rootnex_get_as(ddi_dma_obj_t *dmar_object);
418 
419 /*
420  * _init()
421  *
422  */
423 int
424 _init(void)
425 {
426 
427 	rootnex_state = NULL;
428 	return (mod_install(&rootnex_modlinkage));
429 }
430 
431 
432 /*
433  * _info()
434  *
435  */
436 int
437 _info(struct modinfo *modinfop)
438 {
439 	return (mod_info(&rootnex_modlinkage, modinfop));
440 }
441 
442 
443 /*
444  * _fini()
445  *
446  */
447 int
448 _fini(void)
449 {
450 	return (EBUSY);
451 }
452 
453 
454 /*
455  * rootnex_attach()
456  *
457  */
458 static int
459 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
460 {
461 	int fmcap;
462 	int e;
463 
464 	switch (cmd) {
465 	case DDI_ATTACH:
466 		break;
467 	case DDI_RESUME:
468 #if defined(__amd64) && !defined(__xpv)
469 		return (immu_unquiesce());
470 #else
471 		return (DDI_SUCCESS);
472 #endif
473 	default:
474 		return (DDI_FAILURE);
475 	}
476 
477 	/*
478 	 * We should only have one instance of rootnex. Save it away since we
479 	 * don't have an easy way to get it back later.
480 	 */
481 	ASSERT(rootnex_state == NULL);
482 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
483 
484 	rootnex_state->r_dip = dip;
485 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
486 	rootnex_state->r_reserved_msg_printed = B_FALSE;
487 #ifdef DEBUG
488 	rootnex_cnt = &rootnex_state->r_counters[0];
489 #endif
490 
491 	/*
492 	 * Set minimum fm capability level for i86pc platforms and then
493 	 * initialize error handling. Since we're the rootnex, we don't
494 	 * care what's returned in the fmcap field.
495 	 */
496 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
497 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
498 	fmcap = ddi_system_fmcap;
499 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
500 
501 	/* initialize DMA related state */
502 	e = rootnex_dma_init();
503 	if (e != DDI_SUCCESS) {
504 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
505 		return (DDI_FAILURE);
506 	}
507 
508 	/* Add static root node properties */
509 	rootnex_add_props(dip);
510 
511 	/* since we can't call ddi_report_dev() */
512 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
513 
514 	/* Initialize rootnex event handle */
515 	i_ddi_rootnex_init_events(dip);
516 
517 #if defined(__amd64) && !defined(__xpv)
518 	e = iommulib_nexus_register(dip, &iommulib_nexops,
519 	    &rootnex_state->r_iommulib_handle);
520 
521 	ASSERT(e == DDI_SUCCESS);
522 #endif
523 
524 	return (DDI_SUCCESS);
525 }
526 
527 
528 /*
529  * rootnex_detach()
530  *
531  */
532 /*ARGSUSED*/
533 static int
534 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
535 {
536 	switch (cmd) {
537 	case DDI_SUSPEND:
538 #if defined(__amd64) && !defined(__xpv)
539 		return (immu_quiesce());
540 #else
541 		return (DDI_SUCCESS);
542 #endif
543 	default:
544 		return (DDI_FAILURE);
545 	}
546 	/*NOTREACHED*/
547 
548 }
549 
550 
551 /*
552  * rootnex_dma_init()
553  *
554  */
555 /*ARGSUSED*/
556 static int
557 rootnex_dma_init()
558 {
559 	size_t bufsize;
560 
561 
562 	/*
563 	 * size of our cookie/window/copybuf state needed in dma bind that we
564 	 * pre-alloc in dma_alloc_handle
565 	 */
566 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
567 	rootnex_state->r_prealloc_size =
568 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
569 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
570 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
571 
572 	/*
573 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
574 	 * allocate 16 extra bytes for struct pointer alignment
575 	 * (p->dmai_private & dma->dp_prealloc_buffer)
576 	 */
577 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
578 	    rootnex_state->r_prealloc_size + 0x10;
579 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
580 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
581 	if (rootnex_state->r_dmahdl_cache == NULL) {
582 		return (DDI_FAILURE);
583 	}
584 
585 	/*
586 	 * allocate array to track which major numbers we have printed warnings
587 	 * for.
588 	 */
589 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
590 	    KM_SLEEP);
591 
592 	return (DDI_SUCCESS);
593 }
594 
595 
596 /*
597  * rootnex_add_props()
598  *
599  */
600 static void
601 rootnex_add_props(dev_info_t *dip)
602 {
603 	rootnex_intprop_t *rpp;
604 	int i;
605 
606 	/* Add static integer/boolean properties to the root node */
607 	rpp = rootnex_intprp;
608 	for (i = 0; i < NROOT_INTPROPS; i++) {
609 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
610 		    rpp[i].prop_name, rpp[i].prop_value);
611 	}
612 }
613 
614 
615 
616 /*
617  * *************************
618  *  ctlops related routines
619  * *************************
620  */
621 
622 /*
623  * rootnex_ctlops()
624  *
625  */
626 /*ARGSUSED*/
627 static int
628 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
629     void *arg, void *result)
630 {
631 	int n, *ptr;
632 	struct ddi_parent_private_data *pdp;
633 
634 	switch (ctlop) {
635 	case DDI_CTLOPS_DMAPMAPC:
636 		/*
637 		 * Return 'partial' to indicate that dma mapping
638 		 * has to be done in the main MMU.
639 		 */
640 		return (DDI_DMA_PARTIAL);
641 
642 	case DDI_CTLOPS_BTOP:
643 		/*
644 		 * Convert byte count input to physical page units.
645 		 * (byte counts that are not a page-size multiple
646 		 * are rounded down)
647 		 */
648 		*(ulong_t *)result = btop(*(ulong_t *)arg);
649 		return (DDI_SUCCESS);
650 
651 	case DDI_CTLOPS_PTOB:
652 		/*
653 		 * Convert size in physical pages to bytes
654 		 */
655 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
656 		return (DDI_SUCCESS);
657 
658 	case DDI_CTLOPS_BTOPR:
659 		/*
660 		 * Convert byte count input to physical page units
661 		 * (byte counts that are not a page-size multiple
662 		 * are rounded up)
663 		 */
664 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
665 		return (DDI_SUCCESS);
666 
667 	case DDI_CTLOPS_INITCHILD:
668 		return (impl_ddi_sunbus_initchild(arg));
669 
670 	case DDI_CTLOPS_UNINITCHILD:
671 		impl_ddi_sunbus_removechild(arg);
672 		return (DDI_SUCCESS);
673 
674 	case DDI_CTLOPS_REPORTDEV:
675 		return (rootnex_ctl_reportdev(rdip));
676 
677 	case DDI_CTLOPS_IOMIN:
678 		/*
679 		 * Nothing to do here but reflect back..
680 		 */
681 		return (DDI_SUCCESS);
682 
683 	case DDI_CTLOPS_REGSIZE:
684 	case DDI_CTLOPS_NREGS:
685 		break;
686 
687 	case DDI_CTLOPS_SIDDEV:
688 		if (ndi_dev_is_prom_node(rdip))
689 			return (DDI_SUCCESS);
690 		if (ndi_dev_is_persistent_node(rdip))
691 			return (DDI_SUCCESS);
692 		return (DDI_FAILURE);
693 
694 	case DDI_CTLOPS_POWER:
695 		return ((*pm_platform_power)((power_req_t *)arg));
696 
697 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
698 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
699 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
700 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
701 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
702 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
703 		if (!rootnex_state->r_reserved_msg_printed) {
704 			rootnex_state->r_reserved_msg_printed = B_TRUE;
705 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
706 			    "1 or more reserved/obsolete operations.");
707 		}
708 		return (DDI_FAILURE);
709 
710 	default:
711 		return (DDI_FAILURE);
712 	}
713 	/*
714 	 * The rest are for "hardware" properties
715 	 */
716 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
717 		return (DDI_FAILURE);
718 
719 	if (ctlop == DDI_CTLOPS_NREGS) {
720 		ptr = (int *)result;
721 		*ptr = pdp->par_nreg;
722 	} else {
723 		off_t *size = (off_t *)result;
724 
725 		ptr = (int *)arg;
726 		n = *ptr;
727 		if (n >= pdp->par_nreg) {
728 			return (DDI_FAILURE);
729 		}
730 		*size = (off_t)pdp->par_reg[n].regspec_size;
731 	}
732 	return (DDI_SUCCESS);
733 }
734 
735 
736 /*
737  * rootnex_ctl_reportdev()
738  *
739  */
740 static int
741 rootnex_ctl_reportdev(dev_info_t *dev)
742 {
743 	int i, n, len, f_len = 0;
744 	char *buf;
745 
746 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
747 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
748 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
749 	len = strlen(buf);
750 
751 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
752 
753 		struct regspec *rp = sparc_pd_getreg(dev, i);
754 
755 		if (i == 0)
756 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
757 			    ": ");
758 		else
759 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
760 			    " and ");
761 		len = strlen(buf);
762 
763 		switch (rp->regspec_bustype) {
764 
765 		case BTEISA:
766 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
767 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
768 			break;
769 
770 		case BTISA:
771 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
772 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
773 			break;
774 
775 		default:
776 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
777 			    "space %x offset %x",
778 			    rp->regspec_bustype, rp->regspec_addr);
779 			break;
780 		}
781 		len = strlen(buf);
782 	}
783 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
784 		int pri;
785 
786 		if (i != 0) {
787 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
788 			    ",");
789 			len = strlen(buf);
790 		}
791 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
792 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
793 		    " sparc ipl %d", pri);
794 		len = strlen(buf);
795 	}
796 #ifdef DEBUG
797 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
798 		cmn_err(CE_NOTE, "next message is truncated: "
799 		    "printed length 1024, real length %d", f_len);
800 	}
801 #endif /* DEBUG */
802 	cmn_err(CE_CONT, "?%s\n", buf);
803 	kmem_free(buf, REPORTDEV_BUFSIZE);
804 	return (DDI_SUCCESS);
805 }
806 
807 
808 /*
809  * ******************
810  *  map related code
811  * ******************
812  */
813 
814 /*
815  * rootnex_map()
816  *
817  */
818 static int
819 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
820     off_t len, caddr_t *vaddrp)
821 {
822 	struct regspec *orp = NULL;
823 	struct regspec64 rp = { 0 };
824 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
825 
826 	mp = &mr;
827 
828 	switch (mp->map_op)  {
829 	case DDI_MO_MAP_LOCKED:
830 	case DDI_MO_UNMAP:
831 	case DDI_MO_MAP_HANDLE:
832 		break;
833 	default:
834 #ifdef	DDI_MAP_DEBUG
835 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
836 		    mp->map_op);
837 #endif	/* DDI_MAP_DEBUG */
838 		return (DDI_ME_UNIMPLEMENTED);
839 	}
840 
841 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
842 #ifdef	DDI_MAP_DEBUG
843 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
844 #endif	/* DDI_MAP_DEBUG */
845 		return (DDI_ME_UNIMPLEMENTED);
846 	}
847 
848 	/*
849 	 * First, we need to get the original regspec out before we convert it
850 	 * to the extended format. If we have a register number, then we need to
851 	 * convert that to a regspec.
852 	 */
853 	if (mp->map_type == DDI_MT_RNUMBER)  {
854 
855 		int rnumber = mp->map_obj.rnumber;
856 #ifdef	DDI_MAP_DEBUG
857 		static char *out_of_range =
858 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
859 #endif	/* DDI_MAP_DEBUG */
860 
861 		orp = i_ddi_rnumber_to_regspec(rdip, rnumber);
862 		if (orp == NULL) {
863 #ifdef	DDI_MAP_DEBUG
864 			cmn_err(CE_WARN, out_of_range, rnumber,
865 			    ddi_get_name(rdip));
866 #endif	/* DDI_MAP_DEBUG */
867 			return (DDI_ME_RNUMBER_RANGE);
868 		}
869 	} else if (!(mp->map_flags & DDI_MF_EXT_REGSPEC)) {
870 		orp = mp->map_obj.rp;
871 	}
872 
873 	/*
874 	 * Ensure that we are always using a 64-bit extended regspec regardless
875 	 * of what was passed into us. If the child driver is using a 64-bit
876 	 * regspec, then we need to make sure that we copy this to the local
877 	 * regspec64, rp.
878 	 */
879 	if (orp != NULL) {
880 		rp.regspec_bustype = orp->regspec_bustype;
881 		rp.regspec_addr = orp->regspec_addr;
882 		rp.regspec_size = orp->regspec_size;
883 	} else {
884 		struct regspec64 *rp64;
885 		rp64 = (struct regspec64 *)mp->map_obj.rp;
886 		rp = *rp64;
887 	}
888 
889 	mp->map_type = DDI_MT_REGSPEC;
890 	mp->map_flags |= DDI_MF_EXT_REGSPEC;
891 	mp->map_obj.rp = (struct regspec *)&rp;
892 
893 	/*
894 	 * Adjust offset and length correspnding to called values...
895 	 * XXX: A non-zero length means override the one in the regspec
896 	 * XXX: (regardless of what's in the parent's range?)
897 	 */
898 
899 #ifdef	DDI_MAP_DEBUG
900 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
901 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
902 	    rp.regspec_bustype, rp.regspec_addr, rp.regspec_size, offset,
903 	    len, mp->map_handlep);
904 #endif	/* DDI_MAP_DEBUG */
905 
906 	/*
907 	 * I/O or memory mapping:
908 	 *
909 	 *	<bustype=0, addr=x, len=x>: memory
910 	 *	<bustype=1, addr=x, len=x>: i/o
911 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
912 	 */
913 
914 	if (rp.regspec_bustype > 1 && rp.regspec_addr != 0) {
915 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
916 		    " <0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ">",
917 		    ddi_get_name(dip), ddi_get_name(rdip), rp.regspec_bustype,
918 		    rp.regspec_addr, rp.regspec_size);
919 		return (DDI_ME_INVAL);
920 	}
921 
922 	if (rp.regspec_bustype > 1 && rp.regspec_addr == 0) {
923 		/*
924 		 * compatibility i/o mapping
925 		 */
926 		rp.regspec_bustype += offset;
927 	} else {
928 		/*
929 		 * Normal memory or i/o mapping
930 		 */
931 		rp.regspec_addr += offset;
932 	}
933 
934 	if (len != 0)
935 		rp.regspec_size = len;
936 
937 #ifdef	DDI_MAP_DEBUG
938 	cmn_err(CE_CONT, "             <%s,%s> <0x%" PRIx64 ", 0x%" PRIx64
939 	    ", 0x%" PRId64 "> offset %d len %d handle 0x%x\n",
940 	    ddi_get_name(dip), ddi_get_name(rdip), rp.regspec_bustype,
941 	    rp.regspec_addr, rp.regspec_size, offset, len, mp->map_handlep);
942 #endif	/* DDI_MAP_DEBUG */
943 
944 
945 	/*
946 	 * The x86 root nexus does not have any notion of valid ranges of
947 	 * addresses. Its children have valid ranges, but because there are none
948 	 * for the nexus, we don't need to call i_ddi_apply_range().  Verify
949 	 * that is the case.
950 	 */
951 	ASSERT0(sparc_pd_getnrng(dip));
952 
953 	switch (mp->map_op)  {
954 	case DDI_MO_MAP_LOCKED:
955 
956 		/*
957 		 * Set up the locked down kernel mapping to the regspec...
958 		 */
959 
960 		return (rootnex_map_regspec(mp, vaddrp));
961 
962 	case DDI_MO_UNMAP:
963 
964 		/*
965 		 * Release mapping...
966 		 */
967 
968 		return (rootnex_unmap_regspec(mp, vaddrp));
969 
970 	case DDI_MO_MAP_HANDLE:
971 
972 		return (rootnex_map_handle(mp));
973 
974 	default:
975 		return (DDI_ME_UNIMPLEMENTED);
976 	}
977 }
978 
979 
980 /*
981  * rootnex_map_fault()
982  *
983  *	fault in mappings for requestors
984  */
985 /*ARGSUSED*/
986 static int
987 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
988     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
989     uint_t lock)
990 {
991 
992 #ifdef	DDI_MAP_DEBUG
993 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
994 	ddi_map_debug(" Seg <%s>\n",
995 	    seg->s_ops == &segdev_ops ? "segdev" :
996 	    seg == &kvseg ? "segkmem" : "NONE!");
997 #endif	/* DDI_MAP_DEBUG */
998 
999 	/*
1000 	 * This is all terribly broken, but it is a start
1001 	 *
1002 	 * XXX	Note that this test means that segdev_ops
1003 	 *	must be exported from seg_dev.c.
1004 	 * XXX	What about devices with their own segment drivers?
1005 	 */
1006 	if (seg->s_ops == &segdev_ops) {
1007 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1008 
1009 		if (hat == NULL) {
1010 			/*
1011 			 * This is one plausible interpretation of
1012 			 * a null hat i.e. use the first hat on the
1013 			 * address space hat list which by convention is
1014 			 * the hat of the system MMU.  At alternative
1015 			 * would be to panic .. this might well be better ..
1016 			 */
1017 			ASSERT(AS_READ_HELD(seg->s_as));
1018 			hat = seg->s_as->a_hat;
1019 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
1020 		}
1021 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
1022 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
1023 	} else if (seg == &kvseg && dp == NULL) {
1024 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
1025 		    HAT_LOAD_LOCK);
1026 	} else
1027 		return (DDI_FAILURE);
1028 	return (DDI_SUCCESS);
1029 }
1030 
1031 
1032 static int
1033 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1034 {
1035 	rootnex_addr_t rbase;
1036 	void *cvaddr;
1037 	uint64_t npages, pgoffset;
1038 	struct regspec64 *rp;
1039 	ddi_acc_hdl_t *hp;
1040 	ddi_acc_impl_t *ap;
1041 	uint_t	hat_acc_flags;
1042 	paddr_t pbase;
1043 
1044 	ASSERT(mp->map_flags & DDI_MF_EXT_REGSPEC);
1045 	rp = (struct regspec64 *)mp->map_obj.rp;
1046 	hp = mp->map_handlep;
1047 
1048 #ifdef	DDI_MAP_DEBUG
1049 	ddi_map_debug(
1050 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1051 	    rp->regspec_bustype, rp->regspec_addr,
1052 	    rp->regspec_size, mp->map_handlep);
1053 #endif	/* DDI_MAP_DEBUG */
1054 
1055 	/*
1056 	 * I/O or memory mapping
1057 	 *
1058 	 *	<bustype=0, addr=x, len=x>: memory
1059 	 *	<bustype=1, addr=x, len=x>: i/o
1060 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1061 	 */
1062 
1063 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1064 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1065 		    " <0x%" PRIx64 ", 0x%" PRIx64", 0x%" PRIx64">",
1066 		    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size);
1067 		return (DDI_FAILURE);
1068 	}
1069 
1070 	if (rp->regspec_bustype != 0) {
1071 		/*
1072 		 * I/O space - needs a handle.
1073 		 */
1074 		if (hp == NULL) {
1075 			return (DDI_FAILURE);
1076 		}
1077 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1078 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1079 		impl_acc_hdl_init(hp);
1080 
1081 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1082 #ifdef  DDI_MAP_DEBUG
1083 			ddi_map_debug("rootnex_map_regspec: mmap() "
1084 			    "to I/O space is not supported.\n");
1085 #endif  /* DDI_MAP_DEBUG */
1086 			return (DDI_ME_INVAL);
1087 		} else {
1088 			/*
1089 			 * 1275-compliant vs. compatibility i/o mapping
1090 			 */
1091 			*vaddrp =
1092 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
1093 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
1094 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
1095 #ifdef __xpv
1096 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1097 				hp->ah_pfn = xen_assign_pfn(
1098 				    mmu_btop((ulong_t)rp->regspec_addr &
1099 				    MMU_PAGEMASK));
1100 			} else {
1101 				hp->ah_pfn = mmu_btop(
1102 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
1103 			}
1104 #else
1105 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
1106 			    MMU_PAGEMASK);
1107 #endif
1108 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
1109 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1110 		}
1111 
1112 #ifdef	DDI_MAP_DEBUG
1113 		ddi_map_debug(
1114 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1115 		    rp->regspec_size, *vaddrp);
1116 #endif	/* DDI_MAP_DEBUG */
1117 		return (DDI_SUCCESS);
1118 	}
1119 
1120 	/*
1121 	 * Memory space
1122 	 */
1123 
1124 	if (hp != NULL) {
1125 		/*
1126 		 * hat layer ignores
1127 		 * hp->ah_acc.devacc_attr_endian_flags.
1128 		 */
1129 		switch (hp->ah_acc.devacc_attr_dataorder) {
1130 		case DDI_STRICTORDER_ACC:
1131 			hat_acc_flags = HAT_STRICTORDER;
1132 			break;
1133 		case DDI_UNORDERED_OK_ACC:
1134 			hat_acc_flags = HAT_UNORDERED_OK;
1135 			break;
1136 		case DDI_MERGING_OK_ACC:
1137 			hat_acc_flags = HAT_MERGING_OK;
1138 			break;
1139 		case DDI_LOADCACHING_OK_ACC:
1140 			hat_acc_flags = HAT_LOADCACHING_OK;
1141 			break;
1142 		case DDI_STORECACHING_OK_ACC:
1143 			hat_acc_flags = HAT_STORECACHING_OK;
1144 			break;
1145 		}
1146 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1147 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1148 		impl_acc_hdl_init(hp);
1149 		hp->ah_hat_flags = hat_acc_flags;
1150 	} else {
1151 		hat_acc_flags = HAT_STRICTORDER;
1152 	}
1153 
1154 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
1155 #ifdef __xpv
1156 	/*
1157 	 * If we're dom0, we're using a real device so we need to translate
1158 	 * the MA to a PA.
1159 	 */
1160 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1161 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
1162 	} else {
1163 		pbase = rbase;
1164 	}
1165 #else
1166 	pbase = rbase;
1167 #endif
1168 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1169 
1170 	if (rp->regspec_size == 0) {
1171 #ifdef  DDI_MAP_DEBUG
1172 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1173 #endif  /* DDI_MAP_DEBUG */
1174 		return (DDI_ME_INVAL);
1175 	}
1176 
1177 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1178 		/* extra cast to make gcc happy */
1179 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1180 	} else {
1181 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1182 
1183 #ifdef	DDI_MAP_DEBUG
1184 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1185 		    "physical %llx", npages, pbase);
1186 #endif	/* DDI_MAP_DEBUG */
1187 
1188 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1189 		if (cvaddr == NULL)
1190 			return (DDI_ME_NORESOURCES);
1191 
1192 		/*
1193 		 * Now map in the pages we've allocated...
1194 		 */
1195 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
1196 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
1197 		    HAT_LOAD_LOCK);
1198 		*vaddrp = (caddr_t)cvaddr + pgoffset;
1199 
1200 		/* save away pfn and npages for FMA */
1201 		hp = mp->map_handlep;
1202 		if (hp) {
1203 			hp->ah_pfn = mmu_btop(pbase);
1204 			hp->ah_pnum = npages;
1205 		}
1206 	}
1207 
1208 #ifdef	DDI_MAP_DEBUG
1209 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1210 #endif	/* DDI_MAP_DEBUG */
1211 	return (DDI_SUCCESS);
1212 }
1213 
1214 
1215 static int
1216 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1217 {
1218 	caddr_t addr = (caddr_t)*vaddrp;
1219 	uint64_t npages, pgoffset;
1220 	struct regspec64 *rp;
1221 
1222 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1223 		return (0);
1224 
1225 	ASSERT(mp->map_flags & DDI_MF_EXT_REGSPEC);
1226 	rp = (struct regspec64 *)mp->map_obj.rp;
1227 
1228 	if (rp->regspec_size == 0) {
1229 #ifdef  DDI_MAP_DEBUG
1230 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1231 #endif  /* DDI_MAP_DEBUG */
1232 		return (DDI_ME_INVAL);
1233 	}
1234 
1235 	/*
1236 	 * I/O or memory mapping:
1237 	 *
1238 	 *	<bustype=0, addr=x, len=x>: memory
1239 	 *	<bustype=1, addr=x, len=x>: i/o
1240 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1241 	 */
1242 	if (rp->regspec_bustype != 0) {
1243 		/*
1244 		 * This is I/O space, which requires no particular
1245 		 * processing on unmap since it isn't mapped in the
1246 		 * first place.
1247 		 */
1248 		return (DDI_SUCCESS);
1249 	}
1250 
1251 	/*
1252 	 * Memory space
1253 	 */
1254 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1255 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1256 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1257 	device_arena_free(addr - pgoffset, ptob(npages));
1258 
1259 	/*
1260 	 * Destroy the pointer - the mapping has logically gone
1261 	 */
1262 	*vaddrp = NULL;
1263 
1264 	return (DDI_SUCCESS);
1265 }
1266 
1267 static int
1268 rootnex_map_handle(ddi_map_req_t *mp)
1269 {
1270 	rootnex_addr_t rbase;
1271 	ddi_acc_hdl_t *hp;
1272 	uint64_t pgoffset;
1273 	struct regspec64 *rp;
1274 	paddr_t pbase;
1275 
1276 	rp = (struct regspec64 *)mp->map_obj.rp;
1277 
1278 #ifdef	DDI_MAP_DEBUG
1279 	ddi_map_debug(
1280 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1281 	    rp->regspec_bustype, rp->regspec_addr,
1282 	    rp->regspec_size, mp->map_handlep);
1283 #endif	/* DDI_MAP_DEBUG */
1284 
1285 	/*
1286 	 * I/O or memory mapping:
1287 	 *
1288 	 *	<bustype=0, addr=x, len=x>: memory
1289 	 *	<bustype=1, addr=x, len=x>: i/o
1290 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1291 	 */
1292 	if (rp->regspec_bustype != 0) {
1293 		/*
1294 		 * This refers to I/O space, and we don't support "mapping"
1295 		 * I/O space to a user.
1296 		 */
1297 		return (DDI_FAILURE);
1298 	}
1299 
1300 	/*
1301 	 * Set up the hat_flags for the mapping.
1302 	 */
1303 	hp = mp->map_handlep;
1304 
1305 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1306 	case DDI_NEVERSWAP_ACC:
1307 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1308 		break;
1309 	case DDI_STRUCTURE_LE_ACC:
1310 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1311 		break;
1312 	case DDI_STRUCTURE_BE_ACC:
1313 		return (DDI_FAILURE);
1314 	default:
1315 		return (DDI_REGS_ACC_CONFLICT);
1316 	}
1317 
1318 	switch (hp->ah_acc.devacc_attr_dataorder) {
1319 	case DDI_STRICTORDER_ACC:
1320 		break;
1321 	case DDI_UNORDERED_OK_ACC:
1322 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1323 		break;
1324 	case DDI_MERGING_OK_ACC:
1325 		hp->ah_hat_flags |= HAT_MERGING_OK;
1326 		break;
1327 	case DDI_LOADCACHING_OK_ACC:
1328 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1329 		break;
1330 	case DDI_STORECACHING_OK_ACC:
1331 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1332 		break;
1333 	default:
1334 		return (DDI_FAILURE);
1335 	}
1336 
1337 	rbase = (rootnex_addr_t)rp->regspec_addr &
1338 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
1339 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1340 
1341 	if (rp->regspec_size == 0)
1342 		return (DDI_ME_INVAL);
1343 
1344 #ifdef __xpv
1345 	/*
1346 	 * If we're dom0, we're using a real device so we need to translate
1347 	 * the MA to a PA.
1348 	 */
1349 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1350 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
1351 		    (rbase & MMU_PAGEOFFSET);
1352 	} else {
1353 		pbase = rbase;
1354 	}
1355 #else
1356 	pbase = rbase;
1357 #endif
1358 
1359 	hp->ah_pfn = mmu_btop(pbase);
1360 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1361 
1362 	return (DDI_SUCCESS);
1363 }
1364 
1365 
1366 
1367 /*
1368  * ************************
1369  *  interrupt related code
1370  * ************************
1371  */
1372 
1373 /*
1374  * rootnex_intr_ops()
1375  *	bus_intr_op() function for interrupt support
1376  */
1377 /* ARGSUSED */
1378 static int
1379 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1380     ddi_intr_handle_impl_t *hdlp, void *result)
1381 {
1382 	struct intrspec			*ispec;
1383 
1384 	DDI_INTR_NEXDBG((CE_CONT,
1385 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1386 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
1387 
1388 	/* Process the interrupt operation */
1389 	switch (intr_op) {
1390 	case DDI_INTROP_GETCAP:
1391 		/* First check with pcplusmp */
1392 		if (psm_intr_ops == NULL)
1393 			return (DDI_FAILURE);
1394 
1395 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
1396 			*(int *)result = 0;
1397 			return (DDI_FAILURE);
1398 		}
1399 		break;
1400 	case DDI_INTROP_SETCAP:
1401 		if (psm_intr_ops == NULL)
1402 			return (DDI_FAILURE);
1403 
1404 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
1405 			return (DDI_FAILURE);
1406 		break;
1407 	case DDI_INTROP_ALLOC:
1408 		ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
1409 		return (rootnex_alloc_intr_fixed(rdip, hdlp, result));
1410 	case DDI_INTROP_FREE:
1411 		ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
1412 		return (rootnex_free_intr_fixed(rdip, hdlp));
1413 	case DDI_INTROP_GETPRI:
1414 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1415 			return (DDI_FAILURE);
1416 		*(int *)result = ispec->intrspec_pri;
1417 		break;
1418 	case DDI_INTROP_SETPRI:
1419 		/* Validate the interrupt priority passed to us */
1420 		if (*(int *)result > LOCK_LEVEL)
1421 			return (DDI_FAILURE);
1422 
1423 		/* Ensure that PSM is all initialized and ispec is ok */
1424 		if ((psm_intr_ops == NULL) ||
1425 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
1426 			return (DDI_FAILURE);
1427 
1428 		/* Change the priority */
1429 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
1430 		    PSM_FAILURE)
1431 			return (DDI_FAILURE);
1432 
1433 		/* update the ispec with the new priority */
1434 		ispec->intrspec_pri =  *(int *)result;
1435 		break;
1436 	case DDI_INTROP_ADDISR:
1437 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1438 			return (DDI_FAILURE);
1439 		ispec->intrspec_func = hdlp->ih_cb_func;
1440 		break;
1441 	case DDI_INTROP_REMISR:
1442 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1443 			return (DDI_FAILURE);
1444 		ispec->intrspec_func = (uint_t (*)()) 0;
1445 		break;
1446 	case DDI_INTROP_ENABLE:
1447 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1448 			return (DDI_FAILURE);
1449 
1450 		/* Call psmi to translate irq with the dip */
1451 		if (psm_intr_ops == NULL)
1452 			return (DDI_FAILURE);
1453 
1454 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1455 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
1456 		    (int *)&hdlp->ih_vector) == PSM_FAILURE)
1457 			return (DDI_FAILURE);
1458 
1459 		/* Add the interrupt handler */
1460 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
1461 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1462 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
1463 			return (DDI_FAILURE);
1464 		break;
1465 	case DDI_INTROP_DISABLE:
1466 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1467 			return (DDI_FAILURE);
1468 
1469 		/* Call psm_ops() to translate irq with the dip */
1470 		if (psm_intr_ops == NULL)
1471 			return (DDI_FAILURE);
1472 
1473 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1474 		(void) (*psm_intr_ops)(rdip, hdlp,
1475 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
1476 
1477 		/* Remove the interrupt handler */
1478 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
1479 		    hdlp->ih_cb_func, hdlp->ih_vector);
1480 		break;
1481 	case DDI_INTROP_SETMASK:
1482 		if (psm_intr_ops == NULL)
1483 			return (DDI_FAILURE);
1484 
1485 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
1486 			return (DDI_FAILURE);
1487 		break;
1488 	case DDI_INTROP_CLRMASK:
1489 		if (psm_intr_ops == NULL)
1490 			return (DDI_FAILURE);
1491 
1492 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
1493 			return (DDI_FAILURE);
1494 		break;
1495 	case DDI_INTROP_GETPENDING:
1496 		if (psm_intr_ops == NULL)
1497 			return (DDI_FAILURE);
1498 
1499 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
1500 		    result)) {
1501 			*(int *)result = 0;
1502 			return (DDI_FAILURE);
1503 		}
1504 		break;
1505 	case DDI_INTROP_NAVAIL:
1506 	case DDI_INTROP_NINTRS:
1507 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
1508 		if (*(int *)result == 0) {
1509 			/*
1510 			 * Special case for 'pcic' driver' only. This driver
1511 			 * driver is a child of 'isa' and 'rootnex' drivers.
1512 			 *
1513 			 * See detailed comments on this in the function
1514 			 * rootnex_get_ispec().
1515 			 *
1516 			 * Children of 'pcic' send 'NINITR' request all the
1517 			 * way to rootnex driver. But, the 'pdp->par_nintr'
1518 			 * field may not initialized. So, we fake it here
1519 			 * to return 1 (a la what PCMCIA nexus does).
1520 			 */
1521 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
1522 				*(int *)result = 1;
1523 			else
1524 				return (DDI_FAILURE);
1525 		}
1526 		break;
1527 	case DDI_INTROP_SUPPORTED_TYPES:
1528 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
1529 		break;
1530 	default:
1531 		return (DDI_FAILURE);
1532 	}
1533 
1534 	return (DDI_SUCCESS);
1535 }
1536 
1537 
1538 /*
1539  * rootnex_get_ispec()
1540  *	convert an interrupt number to an interrupt specification.
1541  *	The interrupt number determines which interrupt spec will be
1542  *	returned if more than one exists.
1543  *
1544  *	Look into the parent private data area of the 'rdip' to find out
1545  *	the interrupt specification.  First check to make sure there is
1546  *	one that matchs "inumber" and then return a pointer to it.
1547  *
1548  *	Return NULL if one could not be found.
1549  *
1550  *	NOTE: This is needed for rootnex_intr_ops()
1551  */
1552 static struct intrspec *
1553 rootnex_get_ispec(dev_info_t *rdip, int inum)
1554 {
1555 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1556 
1557 	/*
1558 	 * Special case handling for drivers that provide their own
1559 	 * intrspec structures instead of relying on the DDI framework.
1560 	 *
1561 	 * A broken hardware driver in ON could potentially provide its
1562 	 * own intrspec structure, instead of relying on the hardware.
1563 	 * If these drivers are children of 'rootnex' then we need to
1564 	 * continue to provide backward compatibility to them here.
1565 	 *
1566 	 * Following check is a special case for 'pcic' driver which
1567 	 * was found to have broken hardwre andby provides its own intrspec.
1568 	 *
1569 	 * Verbatim comments from this driver are shown here:
1570 	 * "Don't use the ddi_add_intr since we don't have a
1571 	 * default intrspec in all cases."
1572 	 *
1573 	 * Since an 'ispec' may not be always created for it,
1574 	 * check for that and create one if so.
1575 	 *
1576 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1577 	 */
1578 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1579 		pdp->par_nintr = 1;
1580 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1581 		    pdp->par_nintr, KM_SLEEP);
1582 	}
1583 
1584 	/* Validate the interrupt number */
1585 	if (inum >= pdp->par_nintr)
1586 		return (NULL);
1587 
1588 	/* Get the interrupt structure pointer and return that */
1589 	return ((struct intrspec *)&pdp->par_intr[inum]);
1590 }
1591 
1592 /*
1593  * Allocate interrupt vector for FIXED (legacy) type.
1594  */
1595 static int
1596 rootnex_alloc_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp,
1597     void *result)
1598 {
1599 	struct intrspec		*ispec;
1600 	ddi_intr_handle_impl_t	info_hdl;
1601 	int			ret;
1602 	int			free_phdl = 0;
1603 	apic_get_type_t		type_info;
1604 
1605 	if (psm_intr_ops == NULL)
1606 		return (DDI_FAILURE);
1607 
1608 	if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1609 		return (DDI_FAILURE);
1610 
1611 	/*
1612 	 * If the PSM module is "APIX" then pass the request for it
1613 	 * to allocate the vector now.
1614 	 */
1615 	bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
1616 	info_hdl.ih_private = &type_info;
1617 	if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
1618 	    PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
1619 		if (hdlp->ih_private == NULL) { /* allocate phdl structure */
1620 			free_phdl = 1;
1621 			i_ddi_alloc_intr_phdl(hdlp);
1622 		}
1623 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1624 		ret = (*psm_intr_ops)(rdip, hdlp,
1625 		    PSM_INTR_OP_ALLOC_VECTORS, result);
1626 		if (free_phdl) { /* free up the phdl structure */
1627 			free_phdl = 0;
1628 			i_ddi_free_intr_phdl(hdlp);
1629 			hdlp->ih_private = NULL;
1630 		}
1631 	} else {
1632 		/*
1633 		 * No APIX module; fall back to the old scheme where the
1634 		 * interrupt vector is allocated during ddi_enable_intr() call.
1635 		 */
1636 		hdlp->ih_pri = ispec->intrspec_pri;
1637 		*(int *)result = hdlp->ih_scratch1;
1638 		ret = DDI_SUCCESS;
1639 	}
1640 
1641 	return (ret);
1642 }
1643 
1644 /*
1645  * Free up interrupt vector for FIXED (legacy) type.
1646  */
1647 static int
1648 rootnex_free_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
1649 {
1650 	struct intrspec			*ispec;
1651 	struct ddi_parent_private_data	*pdp;
1652 	ddi_intr_handle_impl_t		info_hdl;
1653 	int				ret;
1654 	apic_get_type_t			type_info;
1655 
1656 	if (psm_intr_ops == NULL)
1657 		return (DDI_FAILURE);
1658 
1659 	/*
1660 	 * If the PSM module is "APIX" then pass the request for it
1661 	 * to free up the vector now.
1662 	 */
1663 	bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
1664 	info_hdl.ih_private = &type_info;
1665 	if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
1666 	    PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
1667 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1668 			return (DDI_FAILURE);
1669 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1670 		ret = (*psm_intr_ops)(rdip, hdlp,
1671 		    PSM_INTR_OP_FREE_VECTORS, NULL);
1672 	} else {
1673 		/*
1674 		 * No APIX module; fall back to the old scheme where
1675 		 * the interrupt vector was already freed during
1676 		 * ddi_disable_intr() call.
1677 		 */
1678 		ret = DDI_SUCCESS;
1679 	}
1680 
1681 	pdp = ddi_get_parent_data(rdip);
1682 
1683 	/*
1684 	 * Special case for 'pcic' driver' only.
1685 	 * If an intrspec was created for it, clean it up here
1686 	 * See detailed comments on this in the function
1687 	 * rootnex_get_ispec().
1688 	 */
1689 	if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1690 		kmem_free(pdp->par_intr, sizeof (struct intrspec) *
1691 		    pdp->par_nintr);
1692 		/*
1693 		 * Set it to zero; so that
1694 		 * DDI framework doesn't free it again
1695 		 */
1696 		pdp->par_intr = NULL;
1697 		pdp->par_nintr = 0;
1698 	}
1699 
1700 	return (ret);
1701 }
1702 
1703 
1704 /*
1705  * ******************
1706  *  dma related code
1707  * ******************
1708  */
1709 
1710 /*ARGSUSED*/
1711 static int
1712 rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1713     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
1714     ddi_dma_handle_t *handlep)
1715 {
1716 	uint64_t maxsegmentsize_ll;
1717 	uint_t maxsegmentsize;
1718 	ddi_dma_impl_t *hp;
1719 	rootnex_dma_t *dma;
1720 	uint64_t count_max;
1721 	uint64_t seg;
1722 	int kmflag;
1723 	int e;
1724 
1725 
1726 	/* convert our sleep flags */
1727 	if (waitfp == DDI_DMA_SLEEP) {
1728 		kmflag = KM_SLEEP;
1729 	} else {
1730 		kmflag = KM_NOSLEEP;
1731 	}
1732 
1733 	/*
1734 	 * We try to do only one memory allocation here. We'll do a little
1735 	 * pointer manipulation later. If the bind ends up taking more than
1736 	 * our prealloc's space, we'll have to allocate more memory in the
1737 	 * bind operation. Not great, but much better than before and the
1738 	 * best we can do with the current bind interfaces.
1739 	 */
1740 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1741 	if (hp == NULL)
1742 		return (DDI_DMA_NORESOURCES);
1743 
1744 	/* Do our pointer manipulation now, align the structures */
1745 	hp->dmai_private = (void *)(((uintptr_t)hp +
1746 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1747 	dma = (rootnex_dma_t *)hp->dmai_private;
1748 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1749 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1750 
1751 	/* setup the handle */
1752 	rootnex_clean_dmahdl(hp);
1753 	hp->dmai_error.err_fep = NULL;
1754 	hp->dmai_error.err_cf = NULL;
1755 	dma->dp_dip = rdip;
1756 	dma->dp_sglinfo.si_flags = attr->dma_attr_flags;
1757 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1758 
1759 	/*
1760 	 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1761 	 * is being used. Set the upper limit to the seg value.
1762 	 * There will be enough DVMA space to always get addresses
1763 	 * that will match the constraints.
1764 	 */
1765 	if (IOMMU_USED(rdip) &&
1766 	    (attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG)) {
1767 		dma->dp_sglinfo.si_max_addr = attr->dma_attr_seg;
1768 		dma->dp_sglinfo.si_flags &= ~_DDI_DMA_BOUNCE_ON_SEG;
1769 	} else
1770 		dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1771 
1772 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1773 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1774 	hp->dmai_rdip = rdip;
1775 	hp->dmai_attr = *attr;
1776 
1777 	if (attr->dma_attr_seg >= dma->dp_sglinfo.si_max_addr)
1778 		dma->dp_sglinfo.si_cancross = B_FALSE;
1779 	else
1780 		dma->dp_sglinfo.si_cancross = B_TRUE;
1781 
1782 	/* we don't need to worry about the SPL since we do a tryenter */
1783 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1784 
1785 	/*
1786 	 * Figure out our maximum segment size. If the segment size is greater
1787 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1788 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1789 	 * dma_attr_count_max are size-1 type values.
1790 	 *
1791 	 * Maximum segment size is the largest physically contiguous chunk of
1792 	 * memory that we can return from a bind (i.e. the maximum size of a
1793 	 * single cookie).
1794 	 */
1795 
1796 	/* handle the rollover cases */
1797 	seg = attr->dma_attr_seg + 1;
1798 	if (seg < attr->dma_attr_seg) {
1799 		seg = attr->dma_attr_seg;
1800 	}
1801 	count_max = attr->dma_attr_count_max + 1;
1802 	if (count_max < attr->dma_attr_count_max) {
1803 		count_max = attr->dma_attr_count_max;
1804 	}
1805 
1806 	/*
1807 	 * granularity may or may not be a power of two. If it isn't, we can't
1808 	 * use a simple mask.
1809 	 */
1810 	if (!ISP2(attr->dma_attr_granular)) {
1811 		dma->dp_granularity_power_2 = B_FALSE;
1812 	} else {
1813 		dma->dp_granularity_power_2 = B_TRUE;
1814 	}
1815 
1816 	/*
1817 	 * maxxfer should be a whole multiple of granularity. If we're going to
1818 	 * break up a window because we're greater than maxxfer, we might as
1819 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1820 	 * worry about triming the window later on for this case.
1821 	 */
1822 	if (attr->dma_attr_granular > 1) {
1823 		if (dma->dp_granularity_power_2) {
1824 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1825 			    (attr->dma_attr_maxxfer &
1826 			    (attr->dma_attr_granular - 1));
1827 		} else {
1828 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1829 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1830 		}
1831 	} else {
1832 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1833 	}
1834 
1835 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1836 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1837 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1838 		maxsegmentsize = 0xFFFFFFFF;
1839 	} else {
1840 		maxsegmentsize = maxsegmentsize_ll;
1841 	}
1842 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1843 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1844 
1845 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1846 	if (rootnex_alloc_check_parms) {
1847 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1848 		if (e != DDI_SUCCESS) {
1849 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1850 			(void) rootnex_dma_freehdl(dip, rdip,
1851 			    (ddi_dma_handle_t)hp);
1852 			return (e);
1853 		}
1854 	}
1855 
1856 	*handlep = (ddi_dma_handle_t)hp;
1857 
1858 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1859 	ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t,
1860 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1861 
1862 	return (DDI_SUCCESS);
1863 }
1864 
1865 
1866 /*
1867  * rootnex_dma_allochdl()
1868  *    called from ddi_dma_alloc_handle().
1869  */
1870 static int
1871 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1872     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1873 {
1874 	int retval = DDI_SUCCESS;
1875 #if defined(__amd64) && !defined(__xpv)
1876 
1877 	if (IOMMU_UNITIALIZED(rdip)) {
1878 		retval = iommulib_nex_open(dip, rdip);
1879 
1880 		if (retval != DDI_SUCCESS && retval != DDI_ENOTSUP)
1881 			return (retval);
1882 	}
1883 
1884 	if (IOMMU_UNUSED(rdip)) {
1885 		retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1886 		    handlep);
1887 	} else {
1888 		retval = iommulib_nexdma_allochdl(dip, rdip, attr,
1889 		    waitfp, arg, handlep);
1890 	}
1891 #else
1892 	retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1893 	    handlep);
1894 #endif
1895 	switch (retval) {
1896 	case DDI_DMA_NORESOURCES:
1897 		if (waitfp != DDI_DMA_DONTWAIT) {
1898 			ddi_set_callback(waitfp, arg,
1899 			    &rootnex_state->r_dvma_call_list_id);
1900 		}
1901 		break;
1902 	case DDI_SUCCESS:
1903 		ndi_fmc_insert(rdip, DMA_HANDLE, *handlep, NULL);
1904 		break;
1905 	default:
1906 		break;
1907 	}
1908 	return (retval);
1909 }
1910 
1911 /*ARGSUSED*/
1912 static int
1913 rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
1914     ddi_dma_handle_t handle)
1915 {
1916 	ddi_dma_impl_t *hp;
1917 	rootnex_dma_t *dma;
1918 
1919 
1920 	hp = (ddi_dma_impl_t *)handle;
1921 	dma = (rootnex_dma_t *)hp->dmai_private;
1922 
1923 	/* unbind should have been called first */
1924 	ASSERT(!dma->dp_inuse);
1925 
1926 	mutex_destroy(&dma->dp_mutex);
1927 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1928 
1929 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1930 	ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t,
1931 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1932 
1933 	return (DDI_SUCCESS);
1934 }
1935 
1936 /*
1937  * rootnex_dma_freehdl()
1938  *    called from ddi_dma_free_handle().
1939  */
1940 static int
1941 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1942 {
1943 	int ret;
1944 
1945 	ndi_fmc_remove(rdip, DMA_HANDLE, handle);
1946 #if defined(__amd64) && !defined(__xpv)
1947 	if (IOMMU_USED(rdip))
1948 		ret = iommulib_nexdma_freehdl(dip, rdip, handle);
1949 	else
1950 #endif
1951 	ret = rootnex_coredma_freehdl(dip, rdip, handle);
1952 
1953 	if (rootnex_state->r_dvma_call_list_id)
1954 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1955 
1956 	return (ret);
1957 }
1958 
1959 /*ARGSUSED*/
1960 static int
1961 rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1962     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1963     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1964 {
1965 	rootnex_sglinfo_t *sinfo;
1966 	ddi_dma_obj_t *dmao;
1967 #if defined(__amd64) && !defined(__xpv)
1968 	struct dvmaseg *dvs;
1969 	ddi_dma_cookie_t *cookie;
1970 #endif
1971 	ddi_dma_attr_t *attr;
1972 	ddi_dma_impl_t *hp;
1973 	rootnex_dma_t *dma;
1974 	int kmflag;
1975 	int e;
1976 	uint_t ncookies;
1977 
1978 	hp = (ddi_dma_impl_t *)handle;
1979 	dma = (rootnex_dma_t *)hp->dmai_private;
1980 	dmao = &dma->dp_dma;
1981 	sinfo = &dma->dp_sglinfo;
1982 	attr = &hp->dmai_attr;
1983 
1984 	/* convert the sleep flags */
1985 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1986 		dma->dp_sleep_flags = kmflag = KM_SLEEP;
1987 	} else {
1988 		dma->dp_sleep_flags = kmflag = KM_NOSLEEP;
1989 	}
1990 
1991 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1992 
1993 	/*
1994 	 * This is useful for debugging a driver. Not as useful in a production
1995 	 * system. The only time this will fail is if you have a driver bug.
1996 	 */
1997 	if (rootnex_bind_check_inuse) {
1998 		/*
1999 		 * No one else should ever have this lock unless someone else
2000 		 * is trying to use this handle. So contention on the lock
2001 		 * is the same as inuse being set.
2002 		 */
2003 		e = mutex_tryenter(&dma->dp_mutex);
2004 		if (e == 0) {
2005 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2006 			return (DDI_DMA_INUSE);
2007 		}
2008 		if (dma->dp_inuse) {
2009 			mutex_exit(&dma->dp_mutex);
2010 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2011 			return (DDI_DMA_INUSE);
2012 		}
2013 		dma->dp_inuse = B_TRUE;
2014 		mutex_exit(&dma->dp_mutex);
2015 	}
2016 
2017 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
2018 	if (rootnex_bind_check_parms) {
2019 		e = rootnex_valid_bind_parms(dmareq, attr);
2020 		if (e != DDI_SUCCESS) {
2021 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2022 			rootnex_clean_dmahdl(hp);
2023 			return (e);
2024 		}
2025 	}
2026 
2027 	/* save away the original bind info */
2028 	dma->dp_dma = dmareq->dmar_object;
2029 
2030 #if defined(__amd64) && !defined(__xpv)
2031 	if (IOMMU_USED(rdip)) {
2032 		dmao = &dma->dp_dvma;
2033 		e = iommulib_nexdma_mapobject(dip, rdip, handle, dmareq, dmao);
2034 		switch (e) {
2035 		case DDI_SUCCESS:
2036 			if (sinfo->si_cancross ||
2037 			    dmao->dmao_obj.dvma_obj.dv_nseg != 1 ||
2038 			    dmao->dmao_size > sinfo->si_max_cookie_size) {
2039 				dma->dp_dvma_used = B_TRUE;
2040 				break;
2041 			}
2042 			sinfo->si_sgl_size = 1;
2043 			hp->dmai_rflags |= DMP_NOSYNC;
2044 
2045 			dma->dp_dvma_used = B_TRUE;
2046 			dma->dp_need_to_free_cookie = B_FALSE;
2047 
2048 			dvs = &dmao->dmao_obj.dvma_obj.dv_seg[0];
2049 			cookie = hp->dmai_cookie = dma->dp_cookies =
2050 			    (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
2051 			cookie->dmac_laddress = dvs->dvs_start +
2052 			    dmao->dmao_obj.dvma_obj.dv_off;
2053 			cookie->dmac_size = dvs->dvs_len;
2054 			cookie->dmac_type = 0;
2055 
2056 			ROOTNEX_DPROBE1(rootnex__bind__dvmafast, dev_info_t *,
2057 			    rdip);
2058 			goto fast;
2059 		case DDI_ENOTSUP:
2060 			break;
2061 		default:
2062 			rootnex_clean_dmahdl(hp);
2063 			return (e);
2064 		}
2065 	}
2066 #endif
2067 
2068 	/*
2069 	 * Figure out a rough estimate of what maximum number of pages
2070 	 * this buffer could use (a high estimate of course).
2071 	 */
2072 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
2073 
2074 	if (dma->dp_dvma_used) {
2075 		/*
2076 		 * The number of physical pages is the worst case.
2077 		 *
2078 		 * For DVMA, the worst case is the length divided
2079 		 * by the maximum cookie length, plus 1. Add to that
2080 		 * the number of segment boundaries potentially crossed, and
2081 		 * the additional number of DVMA segments that was returned.
2082 		 *
2083 		 * In the normal case, for modern devices, si_cancross will
2084 		 * be false, and dv_nseg will be 1, and the fast path will
2085 		 * have been taken above.
2086 		 */
2087 		ncookies = (dma->dp_dma.dmao_size / sinfo->si_max_cookie_size)
2088 		    + 1;
2089 		if (sinfo->si_cancross)
2090 			ncookies +=
2091 			    (dma->dp_dma.dmao_size / attr->dma_attr_seg) + 1;
2092 		ncookies += (dmao->dmao_obj.dvma_obj.dv_nseg - 1);
2093 
2094 		sinfo->si_max_pages = MIN(sinfo->si_max_pages, ncookies);
2095 	}
2096 
2097 	/*
2098 	 * We'll use the pre-allocated cookies for any bind that will *always*
2099 	 * fit (more important to be consistent, we don't want to create
2100 	 * additional degenerate cases).
2101 	 */
2102 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
2103 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
2104 		dma->dp_need_to_free_cookie = B_FALSE;
2105 		ROOTNEX_DPROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
2106 		    uint_t, sinfo->si_max_pages);
2107 
2108 	/*
2109 	 * For anything larger than that, we'll go ahead and allocate the
2110 	 * maximum number of pages we expect to see. Hopefuly, we won't be
2111 	 * seeing this path in the fast path for high performance devices very
2112 	 * frequently.
2113 	 *
2114 	 * a ddi bind interface that allowed the driver to provide storage to
2115 	 * the bind interface would speed this case up.
2116 	 */
2117 	} else {
2118 		/*
2119 		 * Save away how much memory we allocated. If we're doing a
2120 		 * nosleep, the alloc could fail...
2121 		 */
2122 		dma->dp_cookie_size = sinfo->si_max_pages *
2123 		    sizeof (ddi_dma_cookie_t);
2124 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
2125 		if (dma->dp_cookies == NULL) {
2126 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2127 			rootnex_clean_dmahdl(hp);
2128 			return (DDI_DMA_NORESOURCES);
2129 		}
2130 		dma->dp_need_to_free_cookie = B_TRUE;
2131 		ROOTNEX_DPROBE2(rootnex__bind__alloc, dev_info_t *, rdip,
2132 		    uint_t, sinfo->si_max_pages);
2133 	}
2134 	hp->dmai_cookie = dma->dp_cookies;
2135 
2136 	/*
2137 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2138 	 * looking at the constraints in the dma structure. It will then put
2139 	 * some additional state about the sgl in the dma struct (i.e. is
2140 	 * the sgl clean, or do we need to do some munging; how many pages
2141 	 * need to be copied, etc.)
2142 	 */
2143 	if (dma->dp_dvma_used)
2144 		rootnex_dvma_get_sgl(dmao, dma->dp_cookies, &dma->dp_sglinfo);
2145 	else
2146 		rootnex_get_sgl(dmao, dma->dp_cookies, &dma->dp_sglinfo);
2147 
2148 out:
2149 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
2150 	/* if we don't need a copy buffer, we don't need to sync */
2151 	if (sinfo->si_copybuf_req == 0) {
2152 		hp->dmai_rflags |= DMP_NOSYNC;
2153 	}
2154 
2155 	/*
2156 	 * if we don't need the copybuf and we don't need to do a partial,  we
2157 	 * hit the fast path. All the high performance devices should be trying
2158 	 * to hit this path. To hit this path, a device should be able to reach
2159 	 * all of memory, shouldn't try to bind more than it can transfer, and
2160 	 * the buffer shouldn't require more cookies than the driver/device can
2161 	 * handle [sgllen]).
2162 	 *
2163 	 * Note that negative values of dma_attr_sgllen are supposed
2164 	 * to mean unlimited, but we just cast them to mean a
2165 	 * "ridiculous large limit".  This saves some extra checks on
2166 	 * hot paths.
2167 	 */
2168 	if ((sinfo->si_copybuf_req == 0) &&
2169 	    (sinfo->si_sgl_size <= (unsigned)attr->dma_attr_sgllen) &&
2170 	    (dmao->dmao_size <= dma->dp_maxxfer)) {
2171 fast:
2172 		/*
2173 		 * If the driver supports FMA, insert the handle in the FMA DMA
2174 		 * handle cache.
2175 		 */
2176 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2177 			hp->dmai_error.err_cf = rootnex_dma_check;
2178 
2179 		/*
2180 		 * copy out the first cookie and ccountp, set the cookie
2181 		 * pointer to the second cookie. The first cookie is passed
2182 		 * back on the stack. Additional cookies are accessed via
2183 		 * ddi_dma_nextcookie()
2184 		 */
2185 		*cookiep = dma->dp_cookies[0];
2186 		*ccountp = sinfo->si_sgl_size;
2187 		hp->dmai_cookie++;
2188 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2189 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2190 		ROOTNEX_DPROBE4(rootnex__bind__fast, dev_info_t *, rdip,
2191 		    uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS],
2192 		    uint_t, dmao->dmao_size, uint_t, *ccountp);
2193 
2194 
2195 		return (DDI_DMA_MAPPED);
2196 	}
2197 
2198 	/*
2199 	 * go to the slow path, we may need to alloc more memory, create
2200 	 * multiple windows, and munge up a sgl to make the device happy.
2201 	 */
2202 
2203 	/*
2204 	 * With the IOMMU mapobject method used, we should never hit
2205 	 * the slow path. If we do, something is seriously wrong.
2206 	 * Clean up and return an error.
2207 	 */
2208 
2209 #if defined(__amd64) && !defined(__xpv)
2210 
2211 	if (dma->dp_dvma_used) {
2212 		(void) iommulib_nexdma_unmapobject(dip, rdip, handle,
2213 		    &dma->dp_dvma);
2214 		e = DDI_DMA_NOMAPPING;
2215 	} else {
2216 #endif
2217 		e = rootnex_bind_slowpath(hp, dmareq, dma, attr, &dma->dp_dma,
2218 		    kmflag);
2219 #if defined(__amd64) && !defined(__xpv)
2220 	}
2221 #endif
2222 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2223 		if (dma->dp_need_to_free_cookie) {
2224 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2225 		}
2226 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2227 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2228 		return (e);
2229 	}
2230 
2231 	/*
2232 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
2233 	 * cache.
2234 	 */
2235 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2236 		hp->dmai_error.err_cf = rootnex_dma_check;
2237 
2238 	/* if the first window uses the copy buffer, sync it for the device */
2239 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2240 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
2241 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2242 		    DDI_DMA_SYNC_FORDEV);
2243 	}
2244 
2245 	/*
2246 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2247 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2248 	 * If we have a partial map (i.e. multiple windows), the number of
2249 	 * cookies we return is the number of cookies in the first window.
2250 	 */
2251 	if (e == DDI_DMA_MAPPED) {
2252 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2253 		*ccountp = sinfo->si_sgl_size;
2254 		hp->dmai_nwin = 1;
2255 	} else {
2256 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2257 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2258 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2259 	}
2260 	*cookiep = dma->dp_cookies[0];
2261 	hp->dmai_cookie++;
2262 
2263 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2264 	ROOTNEX_DPROBE4(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2265 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2266 	    dmao->dmao_size, uint_t, *ccountp);
2267 	return (e);
2268 }
2269 
2270 /*
2271  * rootnex_dma_bindhdl()
2272  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2273  */
2274 static int
2275 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2276     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2277     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2278 {
2279 	int ret;
2280 #if defined(__amd64) && !defined(__xpv)
2281 	if (IOMMU_USED(rdip))
2282 		ret = iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
2283 		    cookiep, ccountp);
2284 	else
2285 #endif
2286 	ret = rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
2287 	    cookiep, ccountp);
2288 
2289 	if (ret == DDI_DMA_NORESOURCES && dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
2290 		ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg,
2291 		    &rootnex_state->r_dvma_call_list_id);
2292 	}
2293 
2294 	return (ret);
2295 }
2296 
2297 
2298 
2299 /*ARGSUSED*/
2300 static int
2301 rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2302     ddi_dma_handle_t handle)
2303 {
2304 	ddi_dma_impl_t *hp;
2305 	rootnex_dma_t *dma;
2306 	int e;
2307 
2308 	hp = (ddi_dma_impl_t *)handle;
2309 	dma = (rootnex_dma_t *)hp->dmai_private;
2310 
2311 	/* make sure the buffer wasn't free'd before calling unbind */
2312 	if (rootnex_unbind_verify_buffer) {
2313 		e = rootnex_verify_buffer(dma);
2314 		if (e != DDI_SUCCESS) {
2315 			ASSERT(0);
2316 			return (DDI_FAILURE);
2317 		}
2318 	}
2319 
2320 	/* sync the current window before unbinding the buffer */
2321 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2322 	    (hp->dmai_rflags & DDI_DMA_READ)) {
2323 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2324 		    DDI_DMA_SYNC_FORCPU);
2325 	}
2326 
2327 	/*
2328 	 * cleanup and copy buffer or window state. if we didn't use the copy
2329 	 * buffer or windows, there won't be much to do :-)
2330 	 */
2331 	rootnex_teardown_copybuf(dma);
2332 	rootnex_teardown_windows(dma);
2333 
2334 #if defined(__amd64) && !defined(__xpv)
2335 	if (IOMMU_USED(rdip) && dma->dp_dvma_used)
2336 		(void) iommulib_nexdma_unmapobject(dip, rdip, handle,
2337 		    &dma->dp_dvma);
2338 #endif
2339 
2340 	/*
2341 	 * If we had to allocate space to for the worse case sgl (it didn't
2342 	 * fit into our pre-allocate buffer), free that up now
2343 	 */
2344 	if (dma->dp_need_to_free_cookie) {
2345 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2346 	}
2347 
2348 	/*
2349 	 * clean up the handle so it's ready for the next bind (i.e. if the
2350 	 * handle is reused).
2351 	 */
2352 	rootnex_clean_dmahdl(hp);
2353 	hp->dmai_error.err_cf = NULL;
2354 
2355 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2356 	ROOTNEX_DPROBE1(rootnex__unbind, uint64_t,
2357 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2358 
2359 	return (DDI_SUCCESS);
2360 }
2361 
2362 /*
2363  * rootnex_dma_unbindhdl()
2364  *    called from ddi_dma_unbind_handle()
2365  */
2366 /*ARGSUSED*/
2367 static int
2368 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2369     ddi_dma_handle_t handle)
2370 {
2371 	int ret;
2372 
2373 #if defined(__amd64) && !defined(__xpv)
2374 	if (IOMMU_USED(rdip))
2375 		ret = iommulib_nexdma_unbindhdl(dip, rdip, handle);
2376 	else
2377 #endif
2378 	ret = rootnex_coredma_unbindhdl(dip, rdip, handle);
2379 
2380 	if (rootnex_state->r_dvma_call_list_id)
2381 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2382 
2383 	return (ret);
2384 }
2385 
2386 #if defined(__amd64) && !defined(__xpv)
2387 
2388 static int
2389 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle)
2390 {
2391 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2392 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2393 
2394 	if (dma->dp_sleep_flags != KM_SLEEP &&
2395 	    dma->dp_sleep_flags != KM_NOSLEEP)
2396 		cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle");
2397 	return (dma->dp_sleep_flags);
2398 }
2399 /*ARGSUSED*/
2400 static void
2401 rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2402 {
2403 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2404 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2405 	rootnex_window_t *window;
2406 
2407 	if (dma->dp_window) {
2408 		window = &dma->dp_window[dma->dp_current_win];
2409 		hp->dmai_cookie = window->wd_first_cookie;
2410 	} else {
2411 		hp->dmai_cookie = dma->dp_cookies;
2412 	}
2413 	hp->dmai_cookie++;
2414 }
2415 
2416 /*ARGSUSED*/
2417 static int
2418 rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2419     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
2420 {
2421 	int i;
2422 	int km_flags;
2423 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2424 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2425 	rootnex_window_t *window;
2426 	ddi_dma_cookie_t *cp;
2427 	ddi_dma_cookie_t *cookie;
2428 
2429 	ASSERT(*cookiepp == NULL);
2430 	ASSERT(*ccountp == 0);
2431 
2432 	if (dma->dp_window) {
2433 		window = &dma->dp_window[dma->dp_current_win];
2434 		cp = window->wd_first_cookie;
2435 		*ccountp = window->wd_cookie_cnt;
2436 	} else {
2437 		cp = dma->dp_cookies;
2438 		*ccountp = dma->dp_sglinfo.si_sgl_size;
2439 	}
2440 
2441 	km_flags = rootnex_coredma_get_sleep_flags(handle);
2442 	cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags);
2443 	if (cookie == NULL) {
2444 		return (DDI_DMA_NORESOURCES);
2445 	}
2446 
2447 	for (i = 0; i < *ccountp; i++) {
2448 		cookie[i].dmac_notused = cp[i].dmac_notused;
2449 		cookie[i].dmac_type = cp[i].dmac_type;
2450 		cookie[i].dmac_address = cp[i].dmac_address;
2451 		cookie[i].dmac_size = cp[i].dmac_size;
2452 	}
2453 
2454 	*cookiepp = cookie;
2455 
2456 	return (DDI_SUCCESS);
2457 }
2458 
2459 /*ARGSUSED*/
2460 static int
2461 rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2462     ddi_dma_cookie_t *cookiep, uint_t ccount)
2463 {
2464 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2465 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2466 	rootnex_window_t *window;
2467 	ddi_dma_cookie_t *cur_cookiep;
2468 
2469 	ASSERT(cookiep);
2470 	ASSERT(ccount != 0);
2471 	ASSERT(dma->dp_need_to_switch_cookies == B_FALSE);
2472 
2473 	if (dma->dp_window) {
2474 		window = &dma->dp_window[dma->dp_current_win];
2475 		dma->dp_saved_cookies = window->wd_first_cookie;
2476 		window->wd_first_cookie = cookiep;
2477 		ASSERT(ccount == window->wd_cookie_cnt);
2478 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
2479 		    + window->wd_first_cookie;
2480 	} else {
2481 		dma->dp_saved_cookies = dma->dp_cookies;
2482 		dma->dp_cookies = cookiep;
2483 		ASSERT(ccount == dma->dp_sglinfo.si_sgl_size);
2484 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
2485 		    + dma->dp_cookies;
2486 	}
2487 
2488 	dma->dp_need_to_switch_cookies = B_TRUE;
2489 	hp->dmai_cookie = cur_cookiep;
2490 
2491 	return (DDI_SUCCESS);
2492 }
2493 
2494 /*ARGSUSED*/
2495 static int
2496 rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2497 {
2498 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2499 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2500 	rootnex_window_t *window;
2501 	ddi_dma_cookie_t *cur_cookiep;
2502 	ddi_dma_cookie_t *cookie_array;
2503 	uint_t ccount;
2504 
2505 	/* check if cookies have not been switched */
2506 	if (dma->dp_need_to_switch_cookies == B_FALSE)
2507 		return (DDI_SUCCESS);
2508 
2509 	ASSERT(dma->dp_saved_cookies);
2510 
2511 	if (dma->dp_window) {
2512 		window = &dma->dp_window[dma->dp_current_win];
2513 		cookie_array = window->wd_first_cookie;
2514 		window->wd_first_cookie = dma->dp_saved_cookies;
2515 		dma->dp_saved_cookies = NULL;
2516 		ccount = window->wd_cookie_cnt;
2517 		cur_cookiep = (hp->dmai_cookie - cookie_array)
2518 		    + window->wd_first_cookie;
2519 	} else {
2520 		cookie_array = dma->dp_cookies;
2521 		dma->dp_cookies = dma->dp_saved_cookies;
2522 		dma->dp_saved_cookies = NULL;
2523 		ccount = dma->dp_sglinfo.si_sgl_size;
2524 		cur_cookiep = (hp->dmai_cookie - cookie_array)
2525 		    + dma->dp_cookies;
2526 	}
2527 
2528 	kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
2529 
2530 	hp->dmai_cookie = cur_cookiep;
2531 
2532 	dma->dp_need_to_switch_cookies = B_FALSE;
2533 
2534 	return (DDI_SUCCESS);
2535 }
2536 
2537 #endif
2538 
2539 static struct as *
2540 rootnex_get_as(ddi_dma_obj_t *dmao)
2541 {
2542 	struct as *asp;
2543 
2544 	switch (dmao->dmao_type) {
2545 	case DMA_OTYP_VADDR:
2546 	case DMA_OTYP_BUFVADDR:
2547 		asp = dmao->dmao_obj.virt_obj.v_as;
2548 		if (asp == NULL)
2549 			asp = &kas;
2550 		break;
2551 	default:
2552 		asp = NULL;
2553 		break;
2554 	}
2555 	return (asp);
2556 }
2557 
2558 /*
2559  * rootnex_verify_buffer()
2560  *   verify buffer wasn't free'd
2561  */
2562 static int
2563 rootnex_verify_buffer(rootnex_dma_t *dma)
2564 {
2565 	page_t **pplist;
2566 	caddr_t vaddr;
2567 	uint_t pcnt;
2568 	uint_t poff;
2569 	page_t *pp;
2570 	char b;
2571 	int i;
2572 
2573 	/* Figure out how many pages this buffer occupies */
2574 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2575 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2576 	} else {
2577 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2578 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2579 	}
2580 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2581 
2582 	switch (dma->dp_dma.dmao_type) {
2583 	case DMA_OTYP_PAGES:
2584 		/*
2585 		 * for a linked list of pp's walk through them to make sure
2586 		 * they're locked and not free.
2587 		 */
2588 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2589 		for (i = 0; i < pcnt; i++) {
2590 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2591 				return (DDI_FAILURE);
2592 			}
2593 			pp = pp->p_next;
2594 		}
2595 		break;
2596 
2597 	case DMA_OTYP_VADDR:
2598 	case DMA_OTYP_BUFVADDR:
2599 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2600 		/*
2601 		 * for an array of pp's walk through them to make sure they're
2602 		 * not free. It's possible that they may not be locked.
2603 		 */
2604 		if (pplist) {
2605 			for (i = 0; i < pcnt; i++) {
2606 				if (PP_ISFREE(pplist[i])) {
2607 					return (DDI_FAILURE);
2608 				}
2609 			}
2610 
2611 		/* For a virtual address, try to peek at each page */
2612 		} else {
2613 			if (rootnex_get_as(&dma->dp_dma) == &kas) {
2614 				for (i = 0; i < pcnt; i++) {
2615 					if (ddi_peek8(NULL, vaddr, &b) ==
2616 					    DDI_FAILURE)
2617 						return (DDI_FAILURE);
2618 					vaddr += MMU_PAGESIZE;
2619 				}
2620 			}
2621 		}
2622 		break;
2623 
2624 	default:
2625 		cmn_err(CE_PANIC, "rootnex_verify_buffer: bad DMA object");
2626 		break;
2627 	}
2628 
2629 	return (DDI_SUCCESS);
2630 }
2631 
2632 
2633 /*
2634  * rootnex_clean_dmahdl()
2635  *    Clean the dma handle. This should be called on a handle alloc and an
2636  *    unbind handle. Set the handle state to the default settings.
2637  */
2638 static void
2639 rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2640 {
2641 	rootnex_dma_t *dma;
2642 
2643 
2644 	dma = (rootnex_dma_t *)hp->dmai_private;
2645 
2646 	hp->dmai_nwin = 0;
2647 	dma->dp_current_cookie = 0;
2648 	dma->dp_copybuf_size = 0;
2649 	dma->dp_window = NULL;
2650 	dma->dp_cbaddr = NULL;
2651 	dma->dp_inuse = B_FALSE;
2652 	dma->dp_dvma_used = B_FALSE;
2653 	dma->dp_need_to_free_cookie = B_FALSE;
2654 	dma->dp_need_to_switch_cookies = B_FALSE;
2655 	dma->dp_saved_cookies = NULL;
2656 	dma->dp_sleep_flags = KM_PANIC;
2657 	dma->dp_need_to_free_window = B_FALSE;
2658 	dma->dp_partial_required = B_FALSE;
2659 	dma->dp_trim_required = B_FALSE;
2660 	dma->dp_sglinfo.si_copybuf_req = 0;
2661 #if !defined(__amd64)
2662 	dma->dp_cb_remaping = B_FALSE;
2663 	dma->dp_kva = NULL;
2664 #endif
2665 
2666 	/* FMA related initialization */
2667 	hp->dmai_fault = 0;
2668 	hp->dmai_fault_check = NULL;
2669 	hp->dmai_fault_notify = NULL;
2670 	hp->dmai_error.err_ena = 0;
2671 	hp->dmai_error.err_status = DDI_FM_OK;
2672 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2673 	hp->dmai_error.err_ontrap = NULL;
2674 }
2675 
2676 
2677 /*
2678  * rootnex_valid_alloc_parms()
2679  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2680  */
2681 static int
2682 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2683 {
2684 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2685 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2686 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2687 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2688 		return (DDI_DMA_BADATTR);
2689 	}
2690 
2691 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2692 		return (DDI_DMA_BADATTR);
2693 	}
2694 
2695 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2696 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2697 	    attr->dma_attr_sgllen == 0) {
2698 		return (DDI_DMA_BADATTR);
2699 	}
2700 
2701 	/* We should be able to DMA into every byte offset in a page */
2702 	if (maxsegmentsize < MMU_PAGESIZE) {
2703 		return (DDI_DMA_BADATTR);
2704 	}
2705 
2706 	/* if we're bouncing on seg, seg must be <= addr_hi */
2707 	if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) &&
2708 	    (attr->dma_attr_seg > attr->dma_attr_addr_hi)) {
2709 		return (DDI_DMA_BADATTR);
2710 	}
2711 	return (DDI_SUCCESS);
2712 }
2713 
2714 /*
2715  * rootnex_valid_bind_parms()
2716  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2717  */
2718 /* ARGSUSED */
2719 static int
2720 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2721 {
2722 #if !defined(__amd64)
2723 	/*
2724 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2725 	 * we can track the offset for the obsoleted interfaces.
2726 	 */
2727 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2728 		return (DDI_DMA_TOOBIG);
2729 	}
2730 #endif
2731 
2732 	return (DDI_SUCCESS);
2733 }
2734 
2735 
2736 /*
2737  * rootnex_need_bounce_seg()
2738  *    check to see if the buffer lives on both side of the seg.
2739  */
2740 static boolean_t
2741 rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo)
2742 {
2743 	ddi_dma_atyp_t buftype;
2744 	rootnex_addr_t raddr;
2745 	boolean_t lower_addr;
2746 	boolean_t upper_addr;
2747 	uint64_t offset;
2748 	page_t **pplist;
2749 	uint64_t paddr;
2750 	uint32_t psize;
2751 	uint32_t size;
2752 	caddr_t vaddr;
2753 	uint_t pcnt;
2754 	page_t *pp;
2755 
2756 
2757 	/* shortcuts */
2758 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2759 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2760 	buftype = dmar_object->dmao_type;
2761 	size = dmar_object->dmao_size;
2762 
2763 	lower_addr = B_FALSE;
2764 	upper_addr = B_FALSE;
2765 	pcnt = 0;
2766 
2767 	/*
2768 	 * Process the first page to handle the initial offset of the buffer.
2769 	 * We'll use the base address we get later when we loop through all
2770 	 * the pages.
2771 	 */
2772 	if (buftype == DMA_OTYP_PAGES) {
2773 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2774 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2775 		    MMU_PAGEOFFSET;
2776 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2777 		psize = MIN(size, (MMU_PAGESIZE - offset));
2778 		pp = pp->p_next;
2779 		sglinfo->si_asp = NULL;
2780 	} else if (pplist != NULL) {
2781 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2782 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2783 		if (sglinfo->si_asp == NULL) {
2784 			sglinfo->si_asp = &kas;
2785 		}
2786 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2787 		paddr += offset;
2788 		psize = MIN(size, (MMU_PAGESIZE - offset));
2789 		pcnt++;
2790 	} else {
2791 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2792 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2793 		if (sglinfo->si_asp == NULL) {
2794 			sglinfo->si_asp = &kas;
2795 		}
2796 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2797 		paddr += offset;
2798 		psize = MIN(size, (MMU_PAGESIZE - offset));
2799 		vaddr += psize;
2800 	}
2801 
2802 	raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2803 
2804 	if ((raddr + psize) > sglinfo->si_segmask) {
2805 		upper_addr = B_TRUE;
2806 	} else {
2807 		lower_addr = B_TRUE;
2808 	}
2809 	size -= psize;
2810 
2811 	/*
2812 	 * Walk through the rest of the pages in the buffer. Track to see
2813 	 * if we have pages on both sides of the segment boundary.
2814 	 */
2815 	while (size > 0) {
2816 		/* partial or full page */
2817 		psize = MIN(size, MMU_PAGESIZE);
2818 
2819 		if (buftype == DMA_OTYP_PAGES) {
2820 			/* get the paddr from the page_t */
2821 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2822 			paddr = pfn_to_pa(pp->p_pagenum);
2823 			pp = pp->p_next;
2824 		} else if (pplist != NULL) {
2825 			/* index into the array of page_t's to get the paddr */
2826 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2827 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2828 			pcnt++;
2829 		} else {
2830 			/* call into the VM to get the paddr */
2831 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2832 			    vaddr));
2833 			vaddr += psize;
2834 		}
2835 
2836 		raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2837 
2838 		if ((raddr + psize) > sglinfo->si_segmask) {
2839 			upper_addr = B_TRUE;
2840 		} else {
2841 			lower_addr = B_TRUE;
2842 		}
2843 		/*
2844 		 * if the buffer lives both above and below the segment
2845 		 * boundary, or the current page is the page immediately
2846 		 * after the segment, we will use a copy/bounce buffer for
2847 		 * all pages > seg.
2848 		 */
2849 		if ((lower_addr && upper_addr) ||
2850 		    (raddr == (sglinfo->si_segmask + 1))) {
2851 			return (B_TRUE);
2852 		}
2853 
2854 		size -= psize;
2855 	}
2856 
2857 	return (B_FALSE);
2858 }
2859 
2860 /*
2861  * rootnex_get_sgl()
2862  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2863  *    with a call to the vm layer when vm2.0 comes around...
2864  */
2865 static void
2866 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2867     rootnex_sglinfo_t *sglinfo)
2868 {
2869 	ddi_dma_atyp_t buftype;
2870 	rootnex_addr_t raddr;
2871 	uint64_t last_page;
2872 	uint64_t offset;
2873 	uint64_t addrhi;
2874 	uint64_t addrlo;
2875 	uint64_t maxseg;
2876 	page_t **pplist;
2877 	uint64_t paddr;
2878 	uint32_t psize;
2879 	uint32_t size;
2880 	caddr_t vaddr;
2881 	uint_t pcnt;
2882 	page_t *pp;
2883 	uint_t cnt;
2884 
2885 
2886 	/* shortcuts */
2887 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2888 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2889 	maxseg = sglinfo->si_max_cookie_size;
2890 	buftype = dmar_object->dmao_type;
2891 	addrhi = sglinfo->si_max_addr;
2892 	addrlo = sglinfo->si_min_addr;
2893 	size = dmar_object->dmao_size;
2894 
2895 	pcnt = 0;
2896 	cnt = 0;
2897 
2898 
2899 	/*
2900 	 * check to see if we need to use the copy buffer for pages over
2901 	 * the segment attr.
2902 	 */
2903 	sglinfo->si_bounce_on_seg = B_FALSE;
2904 	if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) {
2905 		sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg(
2906 		    dmar_object, sglinfo);
2907 	}
2908 
2909 	/*
2910 	 * if we were passed down a linked list of pages, i.e. pointer to
2911 	 * page_t, use this to get our physical address and buf offset.
2912 	 */
2913 	if (buftype == DMA_OTYP_PAGES) {
2914 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2915 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2916 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2917 		    MMU_PAGEOFFSET;
2918 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2919 		psize = MIN(size, (MMU_PAGESIZE - offset));
2920 		pp = pp->p_next;
2921 		sglinfo->si_asp = NULL;
2922 
2923 	/*
2924 	 * We weren't passed down a linked list of pages, but if we were passed
2925 	 * down an array of pages, use this to get our physical address and buf
2926 	 * offset.
2927 	 */
2928 	} else if (pplist != NULL) {
2929 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2930 		    (buftype == DMA_OTYP_BUFVADDR));
2931 
2932 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2933 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2934 		if (sglinfo->si_asp == NULL) {
2935 			sglinfo->si_asp = &kas;
2936 		}
2937 
2938 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2939 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2940 		paddr += offset;
2941 		psize = MIN(size, (MMU_PAGESIZE - offset));
2942 		pcnt++;
2943 
2944 	/*
2945 	 * All we have is a virtual address, we'll need to call into the VM
2946 	 * to get the physical address.
2947 	 */
2948 	} else {
2949 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2950 		    (buftype == DMA_OTYP_BUFVADDR));
2951 
2952 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2953 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2954 		if (sglinfo->si_asp == NULL) {
2955 			sglinfo->si_asp = &kas;
2956 		}
2957 
2958 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2959 		paddr += offset;
2960 		psize = MIN(size, (MMU_PAGESIZE - offset));
2961 		vaddr += psize;
2962 	}
2963 
2964 	raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2965 
2966 	/*
2967 	 * Setup the first cookie with the physical address of the page and the
2968 	 * size of the page (which takes into account the initial offset into
2969 	 * the page.
2970 	 */
2971 	sgl[cnt].dmac_laddress = raddr;
2972 	sgl[cnt].dmac_size = psize;
2973 	sgl[cnt].dmac_type = 0;
2974 
2975 	/*
2976 	 * Save away the buffer offset into the page. We'll need this later in
2977 	 * the copy buffer code to help figure out the page index within the
2978 	 * buffer and the offset into the current page.
2979 	 */
2980 	sglinfo->si_buf_offset = offset;
2981 
2982 	/*
2983 	 * If we are using the copy buffer for anything over the segment
2984 	 * boundary, and this page is over the segment boundary.
2985 	 *   OR
2986 	 * if the DMA engine can't reach the physical address.
2987 	 */
2988 	if (((sglinfo->si_bounce_on_seg) &&
2989 	    ((raddr + psize) > sglinfo->si_segmask)) ||
2990 	    ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
2991 		/*
2992 		 * Increase how much copy buffer we use. We always increase by
2993 		 * pagesize so we don't have to worry about converting offsets.
2994 		 * Set a flag in the cookies dmac_type to indicate that it uses
2995 		 * the copy buffer. If this isn't the last cookie, go to the
2996 		 * next cookie (since we separate each page which uses the copy
2997 		 * buffer in case the copy buffer is not physically contiguous.
2998 		 */
2999 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
3000 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
3001 		if ((cnt + 1) < sglinfo->si_max_pages) {
3002 			cnt++;
3003 			sgl[cnt].dmac_laddress = 0;
3004 			sgl[cnt].dmac_size = 0;
3005 			sgl[cnt].dmac_type = 0;
3006 		}
3007 	}
3008 
3009 	/*
3010 	 * save this page's physical address so we can figure out if the next
3011 	 * page is physically contiguous. Keep decrementing size until we are
3012 	 * done with the buffer.
3013 	 */
3014 	last_page = raddr & MMU_PAGEMASK;
3015 	size -= psize;
3016 
3017 	while (size > 0) {
3018 		/* Get the size for this page (i.e. partial or full page) */
3019 		psize = MIN(size, MMU_PAGESIZE);
3020 
3021 		if (buftype == DMA_OTYP_PAGES) {
3022 			/* get the paddr from the page_t */
3023 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
3024 			paddr = pfn_to_pa(pp->p_pagenum);
3025 			pp = pp->p_next;
3026 		} else if (pplist != NULL) {
3027 			/* index into the array of page_t's to get the paddr */
3028 			ASSERT(!PP_ISFREE(pplist[pcnt]));
3029 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
3030 			pcnt++;
3031 		} else {
3032 			/* call into the VM to get the paddr */
3033 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
3034 			    vaddr));
3035 			vaddr += psize;
3036 		}
3037 
3038 		raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
3039 
3040 		/*
3041 		 * If we are using the copy buffer for anything over the
3042 		 * segment boundary, and this page is over the segment
3043 		 * boundary.
3044 		 *   OR
3045 		 * if the DMA engine can't reach the physical address.
3046 		 */
3047 		if (((sglinfo->si_bounce_on_seg) &&
3048 		    ((raddr + psize) > sglinfo->si_segmask)) ||
3049 		    ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
3050 
3051 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
3052 
3053 			/*
3054 			 * if there is something in the current cookie, go to
3055 			 * the next one. We only want one page in a cookie which
3056 			 * uses the copybuf since the copybuf doesn't have to
3057 			 * be physically contiguous.
3058 			 */
3059 			if (sgl[cnt].dmac_size != 0) {
3060 				cnt++;
3061 			}
3062 			sgl[cnt].dmac_laddress = raddr;
3063 			sgl[cnt].dmac_size = psize;
3064 #if defined(__amd64)
3065 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
3066 #else
3067 			/*
3068 			 * save the buf offset for 32-bit kernel. used in the
3069 			 * obsoleted interfaces.
3070 			 */
3071 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
3072 			    (dmar_object->dmao_size - size);
3073 #endif
3074 			/* if this isn't the last cookie, go to the next one */
3075 			if ((cnt + 1) < sglinfo->si_max_pages) {
3076 				cnt++;
3077 				sgl[cnt].dmac_laddress = 0;
3078 				sgl[cnt].dmac_size = 0;
3079 				sgl[cnt].dmac_type = 0;
3080 			}
3081 
3082 		/*
3083 		 * this page didn't need the copy buffer, if it's not physically
3084 		 * contiguous, or it would put us over a segment boundary, or it
3085 		 * puts us over the max cookie size, or the current sgl doesn't
3086 		 * have anything in it.
3087 		 */
3088 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
3089 		    !(raddr & sglinfo->si_segmask) ||
3090 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
3091 		    (sgl[cnt].dmac_size == 0)) {
3092 			/*
3093 			 * if we're not already in a new cookie, go to the next
3094 			 * cookie.
3095 			 */
3096 			if (sgl[cnt].dmac_size != 0) {
3097 				cnt++;
3098 			}
3099 
3100 			/* save the cookie information */
3101 			sgl[cnt].dmac_laddress = raddr;
3102 			sgl[cnt].dmac_size = psize;
3103 #if defined(__amd64)
3104 			sgl[cnt].dmac_type = 0;
3105 #else
3106 			/*
3107 			 * save the buf offset for 32-bit kernel. used in the
3108 			 * obsoleted interfaces.
3109 			 */
3110 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
3111 #endif
3112 
3113 		/*
3114 		 * this page didn't need the copy buffer, it is physically
3115 		 * contiguous with the last page, and it's <= the max cookie
3116 		 * size.
3117 		 */
3118 		} else {
3119 			sgl[cnt].dmac_size += psize;
3120 
3121 			/*
3122 			 * if this exactly ==  the maximum cookie size, and
3123 			 * it isn't the last cookie, go to the next cookie.
3124 			 */
3125 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
3126 			    ((cnt + 1) < sglinfo->si_max_pages)) {
3127 				cnt++;
3128 				sgl[cnt].dmac_laddress = 0;
3129 				sgl[cnt].dmac_size = 0;
3130 				sgl[cnt].dmac_type = 0;
3131 			}
3132 		}
3133 
3134 		/*
3135 		 * save this page's physical address so we can figure out if the
3136 		 * next page is physically contiguous. Keep decrementing size
3137 		 * until we are done with the buffer.
3138 		 */
3139 		last_page = raddr;
3140 		size -= psize;
3141 	}
3142 
3143 	/* we're done, save away how many cookies the sgl has */
3144 	if (sgl[cnt].dmac_size == 0) {
3145 		ASSERT(cnt < sglinfo->si_max_pages);
3146 		sglinfo->si_sgl_size = cnt;
3147 	} else {
3148 		sglinfo->si_sgl_size = cnt + 1;
3149 	}
3150 }
3151 
3152 static void
3153 rootnex_dvma_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
3154     rootnex_sglinfo_t *sglinfo)
3155 {
3156 	uint64_t offset;
3157 	uint64_t maxseg;
3158 	uint64_t dvaddr;
3159 	struct dvmaseg *dvs;
3160 	uint64_t paddr;
3161 	uint32_t psize, ssize;
3162 	uint32_t size;
3163 	uint_t cnt;
3164 	int physcontig;
3165 
3166 	ASSERT(dmar_object->dmao_type == DMA_OTYP_DVADDR);
3167 
3168 	/* shortcuts */
3169 	maxseg = sglinfo->si_max_cookie_size;
3170 	size = dmar_object->dmao_size;
3171 
3172 	cnt = 0;
3173 	sglinfo->si_bounce_on_seg = B_FALSE;
3174 
3175 	dvs = dmar_object->dmao_obj.dvma_obj.dv_seg;
3176 	offset = dmar_object->dmao_obj.dvma_obj.dv_off;
3177 	ssize = dvs->dvs_len;
3178 	paddr = dvs->dvs_start;
3179 	paddr += offset;
3180 	psize = MIN(ssize, (maxseg - offset));
3181 	dvaddr = paddr + psize;
3182 	ssize -= psize;
3183 
3184 	sgl[cnt].dmac_laddress = paddr;
3185 	sgl[cnt].dmac_size = psize;
3186 	sgl[cnt].dmac_type = 0;
3187 
3188 	size -= psize;
3189 	while (size > 0) {
3190 		if (ssize == 0) {
3191 			dvs++;
3192 			ssize = dvs->dvs_len;
3193 			dvaddr = dvs->dvs_start;
3194 			physcontig = 0;
3195 		} else
3196 			physcontig = 1;
3197 
3198 		paddr = dvaddr;
3199 		psize = MIN(ssize, maxseg);
3200 		dvaddr += psize;
3201 		ssize -= psize;
3202 
3203 		if (!physcontig || !(paddr & sglinfo->si_segmask) ||
3204 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
3205 		    (sgl[cnt].dmac_size == 0)) {
3206 			/*
3207 			 * if we're not already in a new cookie, go to the next
3208 			 * cookie.
3209 			 */
3210 			if (sgl[cnt].dmac_size != 0) {
3211 				cnt++;
3212 			}
3213 
3214 			/* save the cookie information */
3215 			sgl[cnt].dmac_laddress = paddr;
3216 			sgl[cnt].dmac_size = psize;
3217 			sgl[cnt].dmac_type = 0;
3218 		} else {
3219 			sgl[cnt].dmac_size += psize;
3220 
3221 			/*
3222 			 * if this exactly ==  the maximum cookie size, and
3223 			 * it isn't the last cookie, go to the next cookie.
3224 			 */
3225 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
3226 			    ((cnt + 1) < sglinfo->si_max_pages)) {
3227 				cnt++;
3228 				sgl[cnt].dmac_laddress = 0;
3229 				sgl[cnt].dmac_size = 0;
3230 				sgl[cnt].dmac_type = 0;
3231 			}
3232 		}
3233 		size -= psize;
3234 	}
3235 
3236 	/* we're done, save away how many cookies the sgl has */
3237 	if (sgl[cnt].dmac_size == 0) {
3238 		sglinfo->si_sgl_size = cnt;
3239 	} else {
3240 		sglinfo->si_sgl_size = cnt + 1;
3241 	}
3242 }
3243 
3244 /*
3245  * rootnex_bind_slowpath()
3246  *    Call in the bind path if the calling driver can't use the sgl without
3247  *    modifying it. We either need to use the copy buffer and/or we will end up
3248  *    with a partial bind.
3249  */
3250 static int
3251 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3252     rootnex_dma_t *dma, ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag)
3253 {
3254 	rootnex_sglinfo_t *sinfo;
3255 	rootnex_window_t *window;
3256 	ddi_dma_cookie_t *cookie;
3257 	size_t copybuf_used;
3258 	size_t dmac_size;
3259 	boolean_t partial;
3260 	off_t cur_offset;
3261 	page_t *cur_pp;
3262 	major_t mnum;
3263 	int e;
3264 	int i;
3265 
3266 
3267 	sinfo = &dma->dp_sglinfo;
3268 	copybuf_used = 0;
3269 	partial = B_FALSE;
3270 
3271 	/*
3272 	 * If we're using the copybuf, set the copybuf state in dma struct.
3273 	 * Needs to be first since it sets the copy buffer size.
3274 	 */
3275 	if (sinfo->si_copybuf_req != 0) {
3276 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
3277 		if (e != DDI_SUCCESS) {
3278 			return (e);
3279 		}
3280 	} else {
3281 		dma->dp_copybuf_size = 0;
3282 	}
3283 
3284 	/*
3285 	 * Figure out if we need to do a partial mapping. If so, figure out
3286 	 * if we need to trim the buffers when we munge the sgl.
3287 	 */
3288 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
3289 	    (dmao->dmao_size > dma->dp_maxxfer) ||
3290 	    ((unsigned)attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
3291 		dma->dp_partial_required = B_TRUE;
3292 		if (attr->dma_attr_granular != 1) {
3293 			dma->dp_trim_required = B_TRUE;
3294 		}
3295 	} else {
3296 		dma->dp_partial_required = B_FALSE;
3297 		dma->dp_trim_required = B_FALSE;
3298 	}
3299 
3300 	/* If we need to do a partial bind, make sure the driver supports it */
3301 	if (dma->dp_partial_required &&
3302 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
3303 
3304 		mnum = ddi_driver_major(dma->dp_dip);
3305 		/*
3306 		 * patchable which allows us to print one warning per major
3307 		 * number.
3308 		 */
3309 		if ((rootnex_bind_warn) &&
3310 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
3311 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
3312 			cmn_err(CE_WARN, "!%s: coding error detected, the "
3313 			    "driver is using ddi_dma_attr(9S) incorrectly. "
3314 			    "There is a small risk of data corruption in "
3315 			    "particular with large I/Os. The driver should be "
3316 			    "replaced with a corrected version for proper "
3317 			    "system operation. To disable this warning, add "
3318 			    "'set rootnex:rootnex_bind_warn=0' to "
3319 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
3320 		}
3321 		return (DDI_DMA_TOOBIG);
3322 	}
3323 
3324 	/*
3325 	 * we might need multiple windows, setup state to handle them. In this
3326 	 * code path, we will have at least one window.
3327 	 */
3328 	e = rootnex_setup_windows(hp, dma, attr, dmao, kmflag);
3329 	if (e != DDI_SUCCESS) {
3330 		rootnex_teardown_copybuf(dma);
3331 		return (e);
3332 	}
3333 
3334 	window = &dma->dp_window[0];
3335 	cookie = &dma->dp_cookies[0];
3336 	cur_offset = 0;
3337 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
3338 	if (dmao->dmao_type == DMA_OTYP_PAGES) {
3339 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
3340 	}
3341 
3342 	/* loop though all the cookies we got back from get_sgl() */
3343 	for (i = 0; i < sinfo->si_sgl_size; i++) {
3344 		/*
3345 		 * If we're using the copy buffer, check this cookie and setup
3346 		 * its associated copy buffer state. If this cookie uses the
3347 		 * copy buffer, make sure we sync this window during dma_sync.
3348 		 */
3349 		if (dma->dp_copybuf_size > 0) {
3350 			rootnex_setup_cookie(dmao, dma, cookie,
3351 			    cur_offset, &copybuf_used, &cur_pp);
3352 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3353 				window->wd_dosync = B_TRUE;
3354 			}
3355 		}
3356 
3357 		/*
3358 		 * save away the cookie size, since it could be modified in
3359 		 * the windowing code.
3360 		 */
3361 		dmac_size = cookie->dmac_size;
3362 
3363 		/* if we went over max copybuf size */
3364 		if (dma->dp_copybuf_size &&
3365 		    (copybuf_used > dma->dp_copybuf_size)) {
3366 			partial = B_TRUE;
3367 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
3368 			    cookie, cur_offset, &copybuf_used);
3369 			if (e != DDI_SUCCESS) {
3370 				rootnex_teardown_copybuf(dma);
3371 				rootnex_teardown_windows(dma);
3372 				return (e);
3373 			}
3374 
3375 			/*
3376 			 * if the coookie uses the copy buffer, make sure the
3377 			 * new window we just moved to is set to sync.
3378 			 */
3379 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3380 				window->wd_dosync = B_TRUE;
3381 			}
3382 			ROOTNEX_DPROBE1(rootnex__copybuf__window, dev_info_t *,
3383 			    dma->dp_dip);
3384 
3385 		/* if the cookie cnt == max sgllen, move to the next window */
3386 		} else if (window->wd_cookie_cnt >=
3387 		    (unsigned)attr->dma_attr_sgllen) {
3388 			partial = B_TRUE;
3389 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
3390 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
3391 			    cookie, attr, cur_offset);
3392 			if (e != DDI_SUCCESS) {
3393 				rootnex_teardown_copybuf(dma);
3394 				rootnex_teardown_windows(dma);
3395 				return (e);
3396 			}
3397 
3398 			/*
3399 			 * if the coookie uses the copy buffer, make sure the
3400 			 * new window we just moved to is set to sync.
3401 			 */
3402 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3403 				window->wd_dosync = B_TRUE;
3404 			}
3405 			ROOTNEX_DPROBE1(rootnex__sgllen__window, dev_info_t *,
3406 			    dma->dp_dip);
3407 
3408 		/* else if we will be over maxxfer */
3409 		} else if ((window->wd_size + dmac_size) >
3410 		    dma->dp_maxxfer) {
3411 			partial = B_TRUE;
3412 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
3413 			    cookie);
3414 			if (e != DDI_SUCCESS) {
3415 				rootnex_teardown_copybuf(dma);
3416 				rootnex_teardown_windows(dma);
3417 				return (e);
3418 			}
3419 
3420 			/*
3421 			 * if the coookie uses the copy buffer, make sure the
3422 			 * new window we just moved to is set to sync.
3423 			 */
3424 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3425 				window->wd_dosync = B_TRUE;
3426 			}
3427 			ROOTNEX_DPROBE1(rootnex__maxxfer__window, dev_info_t *,
3428 			    dma->dp_dip);
3429 
3430 		/* else this cookie fits in the current window */
3431 		} else {
3432 			window->wd_cookie_cnt++;
3433 			window->wd_size += dmac_size;
3434 		}
3435 
3436 		/* track our offset into the buffer, go to the next cookie */
3437 		ASSERT(dmac_size <= dmao->dmao_size);
3438 		ASSERT(cookie->dmac_size <= dmac_size);
3439 		cur_offset += dmac_size;
3440 		cookie++;
3441 	}
3442 
3443 	/* if we ended up with a zero sized window in the end, clean it up */
3444 	if (window->wd_size == 0) {
3445 		hp->dmai_nwin--;
3446 		window--;
3447 	}
3448 
3449 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
3450 
3451 	if (!partial) {
3452 		return (DDI_DMA_MAPPED);
3453 	}
3454 
3455 	ASSERT(dma->dp_partial_required);
3456 	return (DDI_DMA_PARTIAL_MAP);
3457 }
3458 
3459 /*
3460  * rootnex_setup_copybuf()
3461  *    Called in bind slowpath. Figures out if we're going to use the copy
3462  *    buffer, and if we do, sets up the basic state to handle it.
3463  */
3464 static int
3465 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3466     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
3467 {
3468 	rootnex_sglinfo_t *sinfo;
3469 	ddi_dma_attr_t lattr;
3470 	size_t max_copybuf;
3471 	int cansleep;
3472 	int e;
3473 #if !defined(__amd64)
3474 	int vmflag;
3475 #endif
3476 
3477 	ASSERT(!dma->dp_dvma_used);
3478 
3479 	sinfo = &dma->dp_sglinfo;
3480 
3481 	/* read this first so it's consistent through the routine  */
3482 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
3483 
3484 	/* We need to call into the rootnex on ddi_dma_sync() */
3485 	hp->dmai_rflags &= ~DMP_NOSYNC;
3486 
3487 	/* make sure the copybuf size <= the max size */
3488 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
3489 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
3490 
3491 #if !defined(__amd64)
3492 	/*
3493 	 * if we don't have kva space to copy to/from, allocate the KVA space
3494 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3495 	 * the 64-bit kernel.
3496 	 */
3497 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
3498 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
3499 
3500 		/* convert the sleep flags */
3501 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3502 			vmflag = VM_SLEEP;
3503 		} else {
3504 			vmflag = VM_NOSLEEP;
3505 		}
3506 
3507 		/* allocate Kernel VA space that we can bcopy to/from */
3508 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
3509 		    vmflag);
3510 		if (dma->dp_kva == NULL) {
3511 			return (DDI_DMA_NORESOURCES);
3512 		}
3513 	}
3514 #endif
3515 
3516 	/* convert the sleep flags */
3517 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3518 		cansleep = 1;
3519 	} else {
3520 		cansleep = 0;
3521 	}
3522 
3523 	/*
3524 	 * Allocate the actual copy buffer. This needs to fit within the DMA
3525 	 * engine limits, so we can't use kmem_alloc... We don't need
3526 	 * contiguous memory (sgllen) since we will be forcing windows on
3527 	 * sgllen anyway.
3528 	 */
3529 	lattr = *attr;
3530 	lattr.dma_attr_align = MMU_PAGESIZE;
3531 	lattr.dma_attr_sgllen = -1;	/* no limit */
3532 	/*
3533 	 * if we're using the copy buffer because of seg, use that for our
3534 	 * upper address limit.
3535 	 */
3536 	if (sinfo->si_bounce_on_seg) {
3537 		lattr.dma_attr_addr_hi = lattr.dma_attr_seg;
3538 	}
3539 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
3540 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
3541 	if (e != DDI_SUCCESS) {
3542 #if !defined(__amd64)
3543 		if (dma->dp_kva != NULL) {
3544 			vmem_free(heap_arena, dma->dp_kva,
3545 			    dma->dp_copybuf_size);
3546 		}
3547 #endif
3548 		return (DDI_DMA_NORESOURCES);
3549 	}
3550 
3551 	ROOTNEX_DPROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
3552 	    size_t, dma->dp_copybuf_size);
3553 
3554 	return (DDI_SUCCESS);
3555 }
3556 
3557 
3558 /*
3559  * rootnex_setup_windows()
3560  *    Called in bind slowpath to setup the window state. We always have windows
3561  *    in the slowpath. Even if the window count = 1.
3562  */
3563 static int
3564 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3565     ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag)
3566 {
3567 	rootnex_window_t *windowp;
3568 	rootnex_sglinfo_t *sinfo;
3569 	size_t copy_state_size;
3570 	size_t win_state_size;
3571 	size_t state_available;
3572 	size_t space_needed;
3573 	uint_t copybuf_win;
3574 	uint_t maxxfer_win;
3575 	size_t space_used;
3576 	uint_t sglwin;
3577 
3578 
3579 	sinfo = &dma->dp_sglinfo;
3580 
3581 	dma->dp_current_win = 0;
3582 	hp->dmai_nwin = 0;
3583 
3584 	/* If we don't need to do a partial, we only have one window */
3585 	if (!dma->dp_partial_required) {
3586 		dma->dp_max_win = 1;
3587 
3588 	/*
3589 	 * we need multiple windows, need to figure out the worse case number
3590 	 * of windows.
3591 	 */
3592 	} else {
3593 		/*
3594 		 * if we need windows because we need more copy buffer that
3595 		 * we allow, the worse case number of windows we could need
3596 		 * here would be (copybuf space required / copybuf space that
3597 		 * we have) plus one for remainder, and plus 2 to handle the
3598 		 * extra pages on the trim for the first and last pages of the
3599 		 * buffer (a page is the minimum window size so under the right
3600 		 * attr settings, you could have a window for each page).
3601 		 * The last page will only be hit here if the size is not a
3602 		 * multiple of the granularity (which theoretically shouldn't
3603 		 * be the case but never has been enforced, so we could have
3604 		 * broken things without it).
3605 		 */
3606 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3607 			ASSERT(dma->dp_copybuf_size > 0);
3608 			copybuf_win = (sinfo->si_copybuf_req /
3609 			    dma->dp_copybuf_size) + 1 + 2;
3610 		} else {
3611 			copybuf_win = 0;
3612 		}
3613 
3614 		/*
3615 		 * if we need windows because we have more cookies than the H/W
3616 		 * can handle, the number of windows we would need here would
3617 		 * be (cookie count / cookies count H/W supports minus 1[for
3618 		 * trim]) plus one for remainder.
3619 		 */
3620 		if ((unsigned)attr->dma_attr_sgllen < sinfo->si_sgl_size) {
3621 			sglwin = (sinfo->si_sgl_size /
3622 			    (attr->dma_attr_sgllen - 1)) + 1;
3623 		} else {
3624 			sglwin = 0;
3625 		}
3626 
3627 		/*
3628 		 * if we need windows because we're binding more memory than the
3629 		 * H/W can transfer at once, the number of windows we would need
3630 		 * here would be (xfer count / max xfer H/W supports) plus one
3631 		 * for remainder, and plus 2 to handle the extra pages on the
3632 		 * trim (see above comment about trim)
3633 		 */
3634 		if (dmao->dmao_size > dma->dp_maxxfer) {
3635 			maxxfer_win = (dmao->dmao_size /
3636 			    dma->dp_maxxfer) + 1 + 2;
3637 		} else {
3638 			maxxfer_win = 0;
3639 		}
3640 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3641 		ASSERT(dma->dp_max_win > 0);
3642 	}
3643 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3644 
3645 	/*
3646 	 * Get space for window and potential copy buffer state. Before we
3647 	 * go and allocate memory, see if we can get away with using what's
3648 	 * left in the pre-allocted state or the dynamically allocated sgl.
3649 	 */
3650 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3651 	    sizeof (ddi_dma_cookie_t));
3652 
3653 	/* if we dynamically allocated space for the cookies */
3654 	if (dma->dp_need_to_free_cookie) {
3655 		/* if we have more space in the pre-allocted buffer, use it */
3656 		ASSERT(space_used <= dma->dp_cookie_size);
3657 		if ((dma->dp_cookie_size - space_used) <=
3658 		    rootnex_state->r_prealloc_size) {
3659 			state_available = rootnex_state->r_prealloc_size;
3660 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3661 
3662 		/*
3663 		 * else, we have more free space in the dynamically allocated
3664 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3665 		 * didn't need a lot of cookies.
3666 		 */
3667 		} else {
3668 			state_available = dma->dp_cookie_size - space_used;
3669 			windowp = (rootnex_window_t *)
3670 			    &dma->dp_cookies[sinfo->si_sgl_size];
3671 		}
3672 
3673 	/* we used the pre-alloced buffer */
3674 	} else {
3675 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3676 		state_available = rootnex_state->r_prealloc_size - space_used;
3677 		windowp = (rootnex_window_t *)
3678 		    &dma->dp_cookies[sinfo->si_sgl_size];
3679 	}
3680 
3681 	/*
3682 	 * figure out how much state we need to track the copy buffer. Add an
3683 	 * addition 8 bytes for pointer alignemnt later.
3684 	 */
3685 	if (dma->dp_copybuf_size > 0) {
3686 		copy_state_size = sinfo->si_max_pages *
3687 		    sizeof (rootnex_pgmap_t);
3688 	} else {
3689 		copy_state_size = 0;
3690 	}
3691 	/* add an additional 8 bytes for pointer alignment */
3692 	space_needed = win_state_size + copy_state_size + 0x8;
3693 
3694 	/* if we have enough space already, use it */
3695 	if (state_available >= space_needed) {
3696 		dma->dp_window = windowp;
3697 		dma->dp_need_to_free_window = B_FALSE;
3698 
3699 	/* not enough space, need to allocate more. */
3700 	} else {
3701 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3702 		if (dma->dp_window == NULL) {
3703 			return (DDI_DMA_NORESOURCES);
3704 		}
3705 		dma->dp_need_to_free_window = B_TRUE;
3706 		dma->dp_window_size = space_needed;
3707 		ROOTNEX_DPROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3708 		    dma->dp_dip, size_t, space_needed);
3709 	}
3710 
3711 	/*
3712 	 * we allocate copy buffer state and window state at the same time.
3713 	 * setup our copy buffer state pointers. Make sure it's aligned.
3714 	 */
3715 	if (dma->dp_copybuf_size > 0) {
3716 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3717 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3718 
3719 #if !defined(__amd64)
3720 		/*
3721 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3722 		 * false/NULL. Should be quicker to bzero vs loop and set.
3723 		 */
3724 		bzero(dma->dp_pgmap, copy_state_size);
3725 #endif
3726 	} else {
3727 		dma->dp_pgmap = NULL;
3728 	}
3729 
3730 	return (DDI_SUCCESS);
3731 }
3732 
3733 
3734 /*
3735  * rootnex_teardown_copybuf()
3736  *    cleans up after rootnex_setup_copybuf()
3737  */
3738 static void
3739 rootnex_teardown_copybuf(rootnex_dma_t *dma)
3740 {
3741 #if !defined(__amd64)
3742 	int i;
3743 
3744 	/*
3745 	 * if we allocated kernel heap VMEM space, go through all the pages and
3746 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3747 	 * arena. Then free the VMEM space.
3748 	 */
3749 	if (dma->dp_kva != NULL) {
3750 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3751 			if (dma->dp_pgmap[i].pm_mapped) {
3752 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3753 				    MMU_PAGESIZE, HAT_UNLOAD);
3754 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3755 			}
3756 		}
3757 
3758 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3759 	}
3760 
3761 #endif
3762 
3763 	/* if we allocated a copy buffer, free it */
3764 	if (dma->dp_cbaddr != NULL) {
3765 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
3766 	}
3767 }
3768 
3769 
3770 /*
3771  * rootnex_teardown_windows()
3772  *    cleans up after rootnex_setup_windows()
3773  */
3774 static void
3775 rootnex_teardown_windows(rootnex_dma_t *dma)
3776 {
3777 	/*
3778 	 * if we had to allocate window state on the last bind (because we
3779 	 * didn't have enough pre-allocated space in the handle), free it.
3780 	 */
3781 	if (dma->dp_need_to_free_window) {
3782 		kmem_free(dma->dp_window, dma->dp_window_size);
3783 	}
3784 }
3785 
3786 
3787 /*
3788  * rootnex_init_win()
3789  *    Called in bind slow path during creation of a new window. Initializes
3790  *    window state to default values.
3791  */
3792 /*ARGSUSED*/
3793 static void
3794 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3795     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3796 {
3797 	hp->dmai_nwin++;
3798 	window->wd_dosync = B_FALSE;
3799 	window->wd_offset = cur_offset;
3800 	window->wd_size = 0;
3801 	window->wd_first_cookie = cookie;
3802 	window->wd_cookie_cnt = 0;
3803 	window->wd_trim.tr_trim_first = B_FALSE;
3804 	window->wd_trim.tr_trim_last = B_FALSE;
3805 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3806 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3807 #if !defined(__amd64)
3808 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3809 #endif
3810 }
3811 
3812 
3813 /*
3814  * rootnex_setup_cookie()
3815  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3816  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3817  *    out if it uses the copy buffer, and if it does, save away everything we'll
3818  *    need during sync.
3819  */
3820 static void
3821 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3822     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3823     page_t **cur_pp)
3824 {
3825 	boolean_t copybuf_sz_power_2;
3826 	rootnex_sglinfo_t *sinfo;
3827 	paddr_t paddr;
3828 	uint_t pidx;
3829 	uint_t pcnt;
3830 	off_t poff;
3831 #if defined(__amd64)
3832 	pfn_t pfn;
3833 #else
3834 	page_t **pplist;
3835 #endif
3836 
3837 	ASSERT(dmar_object->dmao_type != DMA_OTYP_DVADDR);
3838 
3839 	sinfo = &dma->dp_sglinfo;
3840 
3841 	/*
3842 	 * Calculate the page index relative to the start of the buffer. The
3843 	 * index to the current page for our buffer is the offset into the
3844 	 * first page of the buffer plus our current offset into the buffer
3845 	 * itself, shifted of course...
3846 	 */
3847 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3848 	ASSERT(pidx < sinfo->si_max_pages);
3849 
3850 	/* if this cookie uses the copy buffer */
3851 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3852 		/*
3853 		 * NOTE: we know that since this cookie uses the copy buffer, it
3854 		 * is <= MMU_PAGESIZE.
3855 		 */
3856 
3857 		/*
3858 		 * get the offset into the page. For the 64-bit kernel, get the
3859 		 * pfn which we'll use with seg kpm.
3860 		 */
3861 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3862 #if defined(__amd64)
3863 		/* mfn_to_pfn() is a NOP on i86pc */
3864 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
3865 #endif /* __amd64 */
3866 
3867 		/* figure out if the copybuf size is a power of 2 */
3868 		if (!ISP2(dma->dp_copybuf_size)) {
3869 			copybuf_sz_power_2 = B_FALSE;
3870 		} else {
3871 			copybuf_sz_power_2 = B_TRUE;
3872 		}
3873 
3874 		/* This page uses the copy buffer */
3875 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3876 
3877 		/*
3878 		 * save the copy buffer KVA that we'll use with this page.
3879 		 * if we still fit within the copybuf, it's a simple add.
3880 		 * otherwise, we need to wrap over using & or % accordingly.
3881 		 */
3882 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3883 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3884 			    *copybuf_used;
3885 		} else {
3886 			if (copybuf_sz_power_2) {
3887 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3888 				    (uintptr_t)dma->dp_cbaddr +
3889 				    (*copybuf_used &
3890 				    (dma->dp_copybuf_size - 1)));
3891 			} else {
3892 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3893 				    (uintptr_t)dma->dp_cbaddr +
3894 				    (*copybuf_used % dma->dp_copybuf_size));
3895 			}
3896 		}
3897 
3898 		/*
3899 		 * over write the cookie physical address with the address of
3900 		 * the physical address of the copy buffer page that we will
3901 		 * use.
3902 		 */
3903 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3904 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3905 
3906 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(paddr);
3907 
3908 		/* if we have a kernel VA, it's easy, just save that address */
3909 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3910 		    (sinfo->si_asp == &kas)) {
3911 			/*
3912 			 * save away the page aligned virtual address of the
3913 			 * driver buffer. Offsets are handled in the sync code.
3914 			 */
3915 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3916 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3917 			    & MMU_PAGEMASK);
3918 #if !defined(__amd64)
3919 			/*
3920 			 * we didn't need to, and will never need to map this
3921 			 * page.
3922 			 */
3923 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3924 #endif
3925 
3926 		/* we don't have a kernel VA. We need one for the bcopy. */
3927 		} else {
3928 #if defined(__amd64)
3929 			/*
3930 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3931 			 * get a Kernel VA for the corresponding pfn.
3932 			 */
3933 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3934 #else
3935 			/*
3936 			 * for the 32-bit kernel, this is a pain. First we'll
3937 			 * save away the page_t or user VA for this page. This
3938 			 * is needed in rootnex_dma_win() when we switch to a
3939 			 * new window which requires us to re-map the copy
3940 			 * buffer.
3941 			 */
3942 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3943 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3944 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3945 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3946 			} else if (pplist != NULL) {
3947 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3948 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3949 			} else {
3950 				dma->dp_pgmap[pidx].pm_pp = NULL;
3951 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3952 				    (((uintptr_t)
3953 				    dmar_object->dmao_obj.virt_obj.v_addr +
3954 				    cur_offset) & MMU_PAGEMASK);
3955 			}
3956 
3957 			/*
3958 			 * save away the page aligned virtual address which was
3959 			 * allocated from the kernel heap arena (taking into
3960 			 * account if we need more copy buffer than we alloced
3961 			 * and use multiple windows to handle this, i.e. &,%).
3962 			 * NOTE: there isn't and physical memory backing up this
3963 			 * virtual address space currently.
3964 			 */
3965 			if ((*copybuf_used + MMU_PAGESIZE) <=
3966 			    dma->dp_copybuf_size) {
3967 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3968 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3969 				    MMU_PAGEMASK);
3970 			} else {
3971 				if (copybuf_sz_power_2) {
3972 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3973 					    (((uintptr_t)dma->dp_kva +
3974 					    (*copybuf_used &
3975 					    (dma->dp_copybuf_size - 1))) &
3976 					    MMU_PAGEMASK);
3977 				} else {
3978 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3979 					    (((uintptr_t)dma->dp_kva +
3980 					    (*copybuf_used %
3981 					    dma->dp_copybuf_size)) &
3982 					    MMU_PAGEMASK);
3983 				}
3984 			}
3985 
3986 			/*
3987 			 * if we haven't used up the available copy buffer yet,
3988 			 * map the kva to the physical page.
3989 			 */
3990 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3991 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3992 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3993 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3994 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3995 					    dma->dp_pgmap[pidx].pm_kaddr);
3996 				} else {
3997 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3998 					    sinfo->si_asp,
3999 					    dma->dp_pgmap[pidx].pm_kaddr);
4000 				}
4001 
4002 			/*
4003 			 * we've used up the available copy buffer, this page
4004 			 * will have to be mapped during rootnex_dma_win() when
4005 			 * we switch to a new window which requires a re-map
4006 			 * the copy buffer. (32-bit kernel only)
4007 			 */
4008 			} else {
4009 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4010 			}
4011 #endif
4012 			/* go to the next page_t */
4013 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
4014 				*cur_pp = (*cur_pp)->p_next;
4015 			}
4016 		}
4017 
4018 		/* add to the copy buffer count */
4019 		*copybuf_used += MMU_PAGESIZE;
4020 
4021 	/*
4022 	 * This cookie doesn't use the copy buffer. Walk through the pages this
4023 	 * cookie occupies to reflect this.
4024 	 */
4025 	} else {
4026 		/*
4027 		 * figure out how many pages the cookie occupies. We need to
4028 		 * use the original page offset of the buffer and the cookies
4029 		 * offset in the buffer to do this.
4030 		 */
4031 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
4032 		pcnt = mmu_btopr(cookie->dmac_size + poff);
4033 
4034 		while (pcnt > 0) {
4035 #if !defined(__amd64)
4036 			/*
4037 			 * the 32-bit kernel doesn't have seg kpm, so we need
4038 			 * to map in the driver buffer (if it didn't come down
4039 			 * with a kernel VA) on the fly. Since this page doesn't
4040 			 * use the copy buffer, it's not, or will it ever, have
4041 			 * to be mapped in.
4042 			 */
4043 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4044 #endif
4045 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
4046 
4047 			/*
4048 			 * we need to update pidx and cur_pp or we'll loose
4049 			 * track of where we are.
4050 			 */
4051 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
4052 				*cur_pp = (*cur_pp)->p_next;
4053 			}
4054 			pidx++;
4055 			pcnt--;
4056 		}
4057 	}
4058 }
4059 
4060 
4061 /*
4062  * rootnex_sgllen_window_boundary()
4063  *    Called in the bind slow path when the next cookie causes us to exceed (in
4064  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4065  *    length supported by the DMA H/W.
4066  */
4067 static int
4068 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4069     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
4070     off_t cur_offset)
4071 {
4072 	off_t new_offset;
4073 	size_t trim_sz;
4074 	off_t coffset;
4075 
4076 
4077 	/*
4078 	 * if we know we'll never have to trim, it's pretty easy. Just move to
4079 	 * the next window and init it. We're done.
4080 	 */
4081 	if (!dma->dp_trim_required) {
4082 		(*windowp)++;
4083 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4084 		(*windowp)->wd_cookie_cnt++;
4085 		(*windowp)->wd_size = cookie->dmac_size;
4086 		return (DDI_SUCCESS);
4087 	}
4088 
4089 	/* figure out how much we need to trim from the window */
4090 	ASSERT(attr->dma_attr_granular != 0);
4091 	if (dma->dp_granularity_power_2) {
4092 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
4093 	} else {
4094 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
4095 	}
4096 
4097 	/* The window's a whole multiple of granularity. We're done */
4098 	if (trim_sz == 0) {
4099 		(*windowp)++;
4100 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4101 		(*windowp)->wd_cookie_cnt++;
4102 		(*windowp)->wd_size = cookie->dmac_size;
4103 		return (DDI_SUCCESS);
4104 	}
4105 
4106 	/*
4107 	 * The window's not a whole multiple of granularity, since we know this
4108 	 * is due to the sgllen, we need to go back to the last cookie and trim
4109 	 * that one, add the left over part of the old cookie into the new
4110 	 * window, and then add in the new cookie into the new window.
4111 	 */
4112 
4113 	/*
4114 	 * make sure the driver isn't making us do something bad... Trimming and
4115 	 * sgllen == 1 don't go together.
4116 	 */
4117 	if (attr->dma_attr_sgllen == 1) {
4118 		return (DDI_DMA_NOMAPPING);
4119 	}
4120 
4121 	/*
4122 	 * first, setup the current window to account for the trim. Need to go
4123 	 * back to the last cookie for this.
4124 	 */
4125 	cookie--;
4126 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4127 	(*windowp)->wd_trim.tr_last_cookie = cookie;
4128 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4129 	ASSERT(cookie->dmac_size > trim_sz);
4130 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4131 	(*windowp)->wd_size -= trim_sz;
4132 
4133 	/* save the buffer offsets for the next window */
4134 	coffset = cookie->dmac_size - trim_sz;
4135 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4136 
4137 	/*
4138 	 * set this now in case this is the first window. all other cases are
4139 	 * set in dma_win()
4140 	 */
4141 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4142 
4143 	/*
4144 	 * initialize the next window using what's left over in the previous
4145 	 * cookie.
4146 	 */
4147 	(*windowp)++;
4148 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4149 	(*windowp)->wd_cookie_cnt++;
4150 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
4151 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
4152 	(*windowp)->wd_trim.tr_first_size = trim_sz;
4153 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4154 		(*windowp)->wd_dosync = B_TRUE;
4155 	}
4156 
4157 	/*
4158 	 * now go back to the current cookie and add it to the new window. set
4159 	 * the new window size to the what was left over from the previous
4160 	 * cookie and what's in the current cookie.
4161 	 */
4162 	cookie++;
4163 	(*windowp)->wd_cookie_cnt++;
4164 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
4165 
4166 	/*
4167 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
4168 	 * a max size of maxxfer). Handle that case.
4169 	 */
4170 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
4171 		/*
4172 		 * maxxfer is already a whole multiple of granularity, and this
4173 		 * trim will be <= the previous trim (since a cookie can't be
4174 		 * larger than maxxfer). Make things simple here.
4175 		 */
4176 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
4177 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4178 		(*windowp)->wd_trim.tr_last_cookie = cookie;
4179 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4180 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4181 		(*windowp)->wd_size -= trim_sz;
4182 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
4183 
4184 		/* save the buffer offsets for the next window */
4185 		coffset = cookie->dmac_size - trim_sz;
4186 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4187 
4188 		/* setup the next window */
4189 		(*windowp)++;
4190 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4191 		(*windowp)->wd_cookie_cnt++;
4192 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
4193 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4194 		    coffset;
4195 		(*windowp)->wd_trim.tr_first_size = trim_sz;
4196 	}
4197 
4198 	return (DDI_SUCCESS);
4199 }
4200 
4201 
4202 /*
4203  * rootnex_copybuf_window_boundary()
4204  *    Called in bind slowpath when we get to a window boundary because we used
4205  *    up all the copy buffer that we have.
4206  */
4207 static int
4208 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4209     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
4210     size_t *copybuf_used)
4211 {
4212 	rootnex_sglinfo_t *sinfo;
4213 	off_t new_offset;
4214 	size_t trim_sz;
4215 	paddr_t paddr;
4216 	off_t coffset;
4217 	uint_t pidx;
4218 	off_t poff;
4219 
4220 
4221 	sinfo = &dma->dp_sglinfo;
4222 
4223 	/*
4224 	 * the copy buffer should be a whole multiple of page size. We know that
4225 	 * this cookie is <= MMU_PAGESIZE.
4226 	 */
4227 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
4228 
4229 	/*
4230 	 * from now on, all new windows in this bind need to be re-mapped during
4231 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4232 	 * space...
4233 	 */
4234 #if !defined(__amd64)
4235 	dma->dp_cb_remaping = B_TRUE;
4236 #endif
4237 
4238 	/* reset copybuf used */
4239 	*copybuf_used = 0;
4240 
4241 	/*
4242 	 * if we don't have to trim (since granularity is set to 1), go to the
4243 	 * next window and add the current cookie to it. We know the current
4244 	 * cookie uses the copy buffer since we're in this code path.
4245 	 */
4246 	if (!dma->dp_trim_required) {
4247 		(*windowp)++;
4248 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4249 
4250 		/* Add this cookie to the new window */
4251 		(*windowp)->wd_cookie_cnt++;
4252 		(*windowp)->wd_size += cookie->dmac_size;
4253 		*copybuf_used += MMU_PAGESIZE;
4254 		return (DDI_SUCCESS);
4255 	}
4256 
4257 	/*
4258 	 * *** may need to trim, figure it out.
4259 	 */
4260 
4261 	/* figure out how much we need to trim from the window */
4262 	if (dma->dp_granularity_power_2) {
4263 		trim_sz = (*windowp)->wd_size &
4264 		    (hp->dmai_attr.dma_attr_granular - 1);
4265 	} else {
4266 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
4267 	}
4268 
4269 	/*
4270 	 * if the window's a whole multiple of granularity, go to the next
4271 	 * window, init it, then add in the current cookie. We know the current
4272 	 * cookie uses the copy buffer since we're in this code path.
4273 	 */
4274 	if (trim_sz == 0) {
4275 		(*windowp)++;
4276 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4277 
4278 		/* Add this cookie to the new window */
4279 		(*windowp)->wd_cookie_cnt++;
4280 		(*windowp)->wd_size += cookie->dmac_size;
4281 		*copybuf_used += MMU_PAGESIZE;
4282 		return (DDI_SUCCESS);
4283 	}
4284 
4285 	/*
4286 	 * *** We figured it out, we definitly need to trim
4287 	 */
4288 
4289 	/*
4290 	 * make sure the driver isn't making us do something bad...
4291 	 * Trimming and sgllen == 1 don't go together.
4292 	 */
4293 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
4294 		return (DDI_DMA_NOMAPPING);
4295 	}
4296 
4297 	/*
4298 	 * first, setup the current window to account for the trim. Need to go
4299 	 * back to the last cookie for this. Some of the last cookie will be in
4300 	 * the current window, and some of the last cookie will be in the new
4301 	 * window. All of the current cookie will be in the new window.
4302 	 */
4303 	cookie--;
4304 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4305 	(*windowp)->wd_trim.tr_last_cookie = cookie;
4306 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4307 	ASSERT(cookie->dmac_size > trim_sz);
4308 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4309 	(*windowp)->wd_size -= trim_sz;
4310 
4311 	/*
4312 	 * we're trimming the last cookie (not the current cookie). So that
4313 	 * last cookie may have or may not have been using the copy buffer (
4314 	 * we know the cookie passed in uses the copy buffer since we're in
4315 	 * this code path).
4316 	 *
4317 	 * If the last cookie doesn't use the copy buffer, nothing special to
4318 	 * do. However, if it does uses the copy buffer, it will be both the
4319 	 * last page in the current window and the first page in the next
4320 	 * window. Since we are reusing the copy buffer (and KVA space on the
4321 	 * 32-bit kernel), this page will use the end of the copy buffer in the
4322 	 * current window, and the start of the copy buffer in the next window.
4323 	 * Track that info... The cookie physical address was already set to
4324 	 * the copy buffer physical address in setup_cookie..
4325 	 */
4326 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4327 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
4328 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
4329 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
4330 		(*windowp)->wd_trim.tr_last_pidx = pidx;
4331 		(*windowp)->wd_trim.tr_last_cbaddr =
4332 		    dma->dp_pgmap[pidx].pm_cbaddr;
4333 #if !defined(__amd64)
4334 		(*windowp)->wd_trim.tr_last_kaddr =
4335 		    dma->dp_pgmap[pidx].pm_kaddr;
4336 #endif
4337 	}
4338 
4339 	/* save the buffer offsets for the next window */
4340 	coffset = cookie->dmac_size - trim_sz;
4341 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4342 
4343 	/*
4344 	 * set this now in case this is the first window. all other cases are
4345 	 * set in dma_win()
4346 	 */
4347 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4348 
4349 	/*
4350 	 * initialize the next window using what's left over in the previous
4351 	 * cookie.
4352 	 */
4353 	(*windowp)++;
4354 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4355 	(*windowp)->wd_cookie_cnt++;
4356 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
4357 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
4358 	(*windowp)->wd_trim.tr_first_size = trim_sz;
4359 
4360 	/*
4361 	 * again, we're tracking if the last cookie uses the copy buffer.
4362 	 * read the comment above for more info on why we need to track
4363 	 * additional state.
4364 	 *
4365 	 * For the first cookie in the new window, we need reset the physical
4366 	 * address to DMA into to the start of the copy buffer plus any
4367 	 * initial page offset which may be present.
4368 	 */
4369 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4370 		(*windowp)->wd_dosync = B_TRUE;
4371 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
4372 		(*windowp)->wd_trim.tr_first_pidx = pidx;
4373 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
4374 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
4375 
4376 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
4377 		    poff;
4378 		(*windowp)->wd_trim.tr_first_paddr =
4379 		    ROOTNEX_PADDR_TO_RBASE(paddr);
4380 
4381 #if !defined(__amd64)
4382 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
4383 #endif
4384 		/* account for the cookie copybuf usage in the new window */
4385 		*copybuf_used += MMU_PAGESIZE;
4386 
4387 		/*
4388 		 * every piece of code has to have a hack, and here is this
4389 		 * ones :-)
4390 		 *
4391 		 * There is a complex interaction between setup_cookie and the
4392 		 * copybuf window boundary. The complexity had to be in either
4393 		 * the maxxfer window, or the copybuf window, and I chose the
4394 		 * copybuf code.
4395 		 *
4396 		 * So in this code path, we have taken the last cookie,
4397 		 * virtually broken it in half due to the trim, and it happens
4398 		 * to use the copybuf which further complicates life. At the
4399 		 * same time, we have already setup the current cookie, which
4400 		 * is now wrong. More background info: the current cookie uses
4401 		 * the copybuf, so it is only a page long max. So we need to
4402 		 * fix the current cookies copy buffer address, physical
4403 		 * address, and kva for the 32-bit kernel. We due this by
4404 		 * bumping them by page size (of course, we can't due this on
4405 		 * the physical address since the copy buffer may not be
4406 		 * physically contiguous).
4407 		 */
4408 		cookie++;
4409 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
4410 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
4411 
4412 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
4413 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
4414 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(paddr);
4415 
4416 #if !defined(__amd64)
4417 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
4418 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
4419 #endif
4420 	} else {
4421 		/* go back to the current cookie */
4422 		cookie++;
4423 	}
4424 
4425 	/*
4426 	 * add the current cookie to the new window. set the new window size to
4427 	 * the what was left over from the previous cookie and what's in the
4428 	 * current cookie.
4429 	 */
4430 	(*windowp)->wd_cookie_cnt++;
4431 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
4432 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
4433 
4434 	/*
4435 	 * we know that the cookie passed in always uses the copy buffer. We
4436 	 * wouldn't be here if it didn't.
4437 	 */
4438 	*copybuf_used += MMU_PAGESIZE;
4439 
4440 	return (DDI_SUCCESS);
4441 }
4442 
4443 
4444 /*
4445  * rootnex_maxxfer_window_boundary()
4446  *    Called in bind slowpath when we get to a window boundary because we will
4447  *    go over maxxfer.
4448  */
4449 static int
4450 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4451     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
4452 {
4453 	size_t dmac_size;
4454 	off_t new_offset;
4455 	size_t trim_sz;
4456 	off_t coffset;
4457 
4458 
4459 	/*
4460 	 * calculate how much we have to trim off of the current cookie to equal
4461 	 * maxxfer. We don't have to account for granularity here since our
4462 	 * maxxfer already takes that into account.
4463 	 */
4464 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
4465 	ASSERT(trim_sz <= cookie->dmac_size);
4466 	ASSERT(trim_sz <= dma->dp_maxxfer);
4467 
4468 	/* save cookie size since we need it later and we might change it */
4469 	dmac_size = cookie->dmac_size;
4470 
4471 	/*
4472 	 * if we're not trimming the entire cookie, setup the current window to
4473 	 * account for the trim.
4474 	 */
4475 	if (trim_sz < cookie->dmac_size) {
4476 		(*windowp)->wd_cookie_cnt++;
4477 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4478 		(*windowp)->wd_trim.tr_last_cookie = cookie;
4479 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4480 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4481 		(*windowp)->wd_size = dma->dp_maxxfer;
4482 
4483 		/*
4484 		 * set the adjusted cookie size now in case this is the first
4485 		 * window. All other windows are taken care of in get win
4486 		 */
4487 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4488 	}
4489 
4490 	/*
4491 	 * coffset is the current offset within the cookie, new_offset is the
4492 	 * current offset with the entire buffer.
4493 	 */
4494 	coffset = dmac_size - trim_sz;
4495 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4496 
4497 	/* initialize the next window */
4498 	(*windowp)++;
4499 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4500 	(*windowp)->wd_cookie_cnt++;
4501 	(*windowp)->wd_size = trim_sz;
4502 	if (trim_sz < dmac_size) {
4503 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
4504 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4505 		    coffset;
4506 		(*windowp)->wd_trim.tr_first_size = trim_sz;
4507 	}
4508 
4509 	return (DDI_SUCCESS);
4510 }
4511 
4512 
4513 /*ARGSUSED*/
4514 static int
4515 rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4516     off_t off, size_t len, uint_t cache_flags)
4517 {
4518 	rootnex_sglinfo_t *sinfo;
4519 	rootnex_pgmap_t *cbpage;
4520 	rootnex_window_t *win;
4521 	ddi_dma_impl_t *hp;
4522 	rootnex_dma_t *dma;
4523 	caddr_t fromaddr;
4524 	caddr_t toaddr;
4525 	uint_t psize;
4526 	off_t offset;
4527 	uint_t pidx;
4528 	size_t size;
4529 	off_t poff;
4530 	int e;
4531 
4532 
4533 	hp = (ddi_dma_impl_t *)handle;
4534 	dma = (rootnex_dma_t *)hp->dmai_private;
4535 	sinfo = &dma->dp_sglinfo;
4536 
4537 	/*
4538 	 * if we don't have any windows, we don't need to sync. A copybuf
4539 	 * will cause us to have at least one window.
4540 	 */
4541 	if (dma->dp_window == NULL) {
4542 		return (DDI_SUCCESS);
4543 	}
4544 
4545 	/* This window may not need to be sync'd */
4546 	win = &dma->dp_window[dma->dp_current_win];
4547 	if (!win->wd_dosync) {
4548 		return (DDI_SUCCESS);
4549 	}
4550 
4551 	/* handle off and len special cases */
4552 	if ((off == 0) || (rootnex_sync_ignore_params)) {
4553 		offset = win->wd_offset;
4554 	} else {
4555 		offset = off;
4556 	}
4557 	if ((len == 0) || (rootnex_sync_ignore_params)) {
4558 		size = win->wd_size;
4559 	} else {
4560 		size = len;
4561 	}
4562 
4563 	/* check the sync args to make sure they make a little sense */
4564 	if (rootnex_sync_check_parms) {
4565 		e = rootnex_valid_sync_parms(hp, win, offset, size,
4566 		    cache_flags);
4567 		if (e != DDI_SUCCESS) {
4568 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4569 			return (DDI_FAILURE);
4570 		}
4571 	}
4572 
4573 	/*
4574 	 * special case the first page to handle the offset into the page. The
4575 	 * offset to the current page for our buffer is the offset into the
4576 	 * first page of the buffer plus our current offset into the buffer
4577 	 * itself, masked of course.
4578 	 */
4579 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4580 	psize = MIN((MMU_PAGESIZE - poff), size);
4581 
4582 	/* go through all the pages that we want to sync */
4583 	while (size > 0) {
4584 		/*
4585 		 * Calculate the page index relative to the start of the buffer.
4586 		 * The index to the current page for our buffer is the offset
4587 		 * into the first page of the buffer plus our current offset
4588 		 * into the buffer itself, shifted of course...
4589 		 */
4590 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4591 		ASSERT(pidx < sinfo->si_max_pages);
4592 
4593 		/*
4594 		 * if this page uses the copy buffer, we need to sync it,
4595 		 * otherwise, go on to the next page.
4596 		 */
4597 		cbpage = &dma->dp_pgmap[pidx];
4598 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4599 		    (cbpage->pm_uses_copybuf == B_FALSE));
4600 		if (cbpage->pm_uses_copybuf) {
4601 			/* cbaddr and kaddr should be page aligned */
4602 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4603 			    MMU_PAGEOFFSET) == 0);
4604 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
4605 			    MMU_PAGEOFFSET) == 0);
4606 
4607 			/*
4608 			 * if we're copying for the device, we are going to
4609 			 * copy from the drivers buffer and to the rootnex
4610 			 * allocated copy buffer.
4611 			 */
4612 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4613 				fromaddr = cbpage->pm_kaddr + poff;
4614 				toaddr = cbpage->pm_cbaddr + poff;
4615 				ROOTNEX_DPROBE2(rootnex__sync__dev,
4616 				    dev_info_t *, dma->dp_dip, size_t, psize);
4617 
4618 			/*
4619 			 * if we're copying for the cpu/kernel, we are going to
4620 			 * copy from the rootnex allocated copy buffer to the
4621 			 * drivers buffer.
4622 			 */
4623 			} else {
4624 				fromaddr = cbpage->pm_cbaddr + poff;
4625 				toaddr = cbpage->pm_kaddr + poff;
4626 				ROOTNEX_DPROBE2(rootnex__sync__cpu,
4627 				    dev_info_t *, dma->dp_dip, size_t, psize);
4628 			}
4629 
4630 			bcopy(fromaddr, toaddr, psize);
4631 		}
4632 
4633 		/*
4634 		 * decrement size until we're done, update our offset into the
4635 		 * buffer, and get the next page size.
4636 		 */
4637 		size -= psize;
4638 		offset += psize;
4639 		psize = MIN(MMU_PAGESIZE, size);
4640 
4641 		/* page offset is zero for the rest of this loop */
4642 		poff = 0;
4643 	}
4644 
4645 	return (DDI_SUCCESS);
4646 }
4647 
4648 /*
4649  * rootnex_dma_sync()
4650  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4651  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4652  *    is set, ddi_dma_sync() returns immediately passing back success.
4653  */
4654 /*ARGSUSED*/
4655 static int
4656 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4657     off_t off, size_t len, uint_t cache_flags)
4658 {
4659 #if defined(__amd64) && !defined(__xpv)
4660 	if (IOMMU_USED(rdip)) {
4661 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
4662 		    cache_flags));
4663 	}
4664 #endif
4665 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
4666 	    cache_flags));
4667 }
4668 
4669 /*
4670  * rootnex_valid_sync_parms()
4671  *    checks the parameters passed to sync to verify they are correct.
4672  */
4673 static int
4674 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4675     off_t offset, size_t size, uint_t cache_flags)
4676 {
4677 	off_t woffset;
4678 
4679 
4680 	/*
4681 	 * the first part of the test to make sure the offset passed in is
4682 	 * within the window.
4683 	 */
4684 	if (offset < win->wd_offset) {
4685 		return (DDI_FAILURE);
4686 	}
4687 
4688 	/*
4689 	 * second and last part of the test to make sure the offset and length
4690 	 * passed in is within the window.
4691 	 */
4692 	woffset = offset - win->wd_offset;
4693 	if ((woffset + size) > win->wd_size) {
4694 		return (DDI_FAILURE);
4695 	}
4696 
4697 	/*
4698 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4699 	 * be set too.
4700 	 */
4701 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4702 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4703 		return (DDI_SUCCESS);
4704 	}
4705 
4706 	/*
4707 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4708 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4709 	 */
4710 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4711 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4712 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4713 		return (DDI_SUCCESS);
4714 	}
4715 
4716 	return (DDI_FAILURE);
4717 }
4718 
4719 
4720 /*ARGSUSED*/
4721 static int
4722 rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4723     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4724     uint_t *ccountp)
4725 {
4726 	rootnex_window_t *window;
4727 	rootnex_trim_t *trim;
4728 	ddi_dma_impl_t *hp;
4729 	rootnex_dma_t *dma;
4730 	ddi_dma_obj_t *dmao;
4731 #if !defined(__amd64)
4732 	rootnex_sglinfo_t *sinfo;
4733 	rootnex_pgmap_t *pmap;
4734 	uint_t pidx;
4735 	uint_t pcnt;
4736 	off_t poff;
4737 	int i;
4738 #endif
4739 
4740 
4741 	hp = (ddi_dma_impl_t *)handle;
4742 	dma = (rootnex_dma_t *)hp->dmai_private;
4743 #if !defined(__amd64)
4744 	sinfo = &dma->dp_sglinfo;
4745 #endif
4746 
4747 	/* If we try and get a window which doesn't exist, return failure */
4748 	if (win >= hp->dmai_nwin) {
4749 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4750 		return (DDI_FAILURE);
4751 	}
4752 
4753 	dmao = dma->dp_dvma_used ? &dma->dp_dvma : &dma->dp_dma;
4754 
4755 	/*
4756 	 * if we don't have any windows, and they're asking for the first
4757 	 * window, setup the cookie pointer to the first cookie in the bind.
4758 	 * setup our return values, then increment the cookie since we return
4759 	 * the first cookie on the stack.
4760 	 */
4761 	if (dma->dp_window == NULL) {
4762 		if (win != 0) {
4763 			ROOTNEX_DPROF_INC(
4764 			    &rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4765 			return (DDI_FAILURE);
4766 		}
4767 		hp->dmai_cookie = dma->dp_cookies;
4768 		*offp = 0;
4769 		*lenp = dmao->dmao_size;
4770 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4771 		*cookiep = hp->dmai_cookie[0];
4772 		hp->dmai_cookie++;
4773 		return (DDI_SUCCESS);
4774 	}
4775 
4776 	/* sync the old window before moving on to the new one */
4777 	window = &dma->dp_window[dma->dp_current_win];
4778 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
4779 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4780 		    DDI_DMA_SYNC_FORCPU);
4781 	}
4782 
4783 #if !defined(__amd64)
4784 	/*
4785 	 * before we move to the next window, if we need to re-map, unmap all
4786 	 * the pages in this window.
4787 	 */
4788 	if (dma->dp_cb_remaping) {
4789 		/*
4790 		 * If we switch to this window again, we'll need to map in
4791 		 * on the fly next time.
4792 		 */
4793 		window->wd_remap_copybuf = B_TRUE;
4794 
4795 		/*
4796 		 * calculate the page index into the buffer where this window
4797 		 * starts, and the number of pages this window takes up.
4798 		 */
4799 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4800 		    MMU_PAGESHIFT;
4801 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4802 		    MMU_PAGEOFFSET;
4803 		pcnt = mmu_btopr(window->wd_size + poff);
4804 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4805 
4806 		/* unmap pages which are currently mapped in this window */
4807 		for (i = 0; i < pcnt; i++) {
4808 			if (dma->dp_pgmap[pidx].pm_mapped) {
4809 				hat_unload(kas.a_hat,
4810 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4811 				    HAT_UNLOAD);
4812 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4813 			}
4814 			pidx++;
4815 		}
4816 	}
4817 #endif
4818 
4819 	/*
4820 	 * Move to the new window.
4821 	 * NOTE: current_win must be set for sync to work right
4822 	 */
4823 	dma->dp_current_win = win;
4824 	window = &dma->dp_window[win];
4825 
4826 	/* if needed, adjust the first and/or last cookies for trim */
4827 	trim = &window->wd_trim;
4828 	if (trim->tr_trim_first) {
4829 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4830 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4831 #if !defined(__amd64)
4832 		window->wd_first_cookie->dmac_type =
4833 		    (window->wd_first_cookie->dmac_type &
4834 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4835 #endif
4836 		if (trim->tr_first_copybuf_win) {
4837 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4838 			    trim->tr_first_cbaddr;
4839 #if !defined(__amd64)
4840 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4841 			    trim->tr_first_kaddr;
4842 #endif
4843 		}
4844 	}
4845 	if (trim->tr_trim_last) {
4846 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4847 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4848 		if (trim->tr_last_copybuf_win) {
4849 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4850 			    trim->tr_last_cbaddr;
4851 #if !defined(__amd64)
4852 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4853 			    trim->tr_last_kaddr;
4854 #endif
4855 		}
4856 	}
4857 
4858 	/*
4859 	 * setup the cookie pointer to the first cookie in the window. setup
4860 	 * our return values, then increment the cookie since we return the
4861 	 * first cookie on the stack.
4862 	 */
4863 	hp->dmai_cookie = window->wd_first_cookie;
4864 	*offp = window->wd_offset;
4865 	*lenp = window->wd_size;
4866 	*ccountp = window->wd_cookie_cnt;
4867 	*cookiep = hp->dmai_cookie[0];
4868 	hp->dmai_cookie++;
4869 
4870 #if !defined(__amd64)
4871 	/* re-map copybuf if required for this window */
4872 	if (dma->dp_cb_remaping) {
4873 		/*
4874 		 * calculate the page index into the buffer where this
4875 		 * window starts.
4876 		 */
4877 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4878 		    MMU_PAGESHIFT;
4879 		ASSERT(pidx < sinfo->si_max_pages);
4880 
4881 		/*
4882 		 * the first page can get unmapped if it's shared with the
4883 		 * previous window. Even if the rest of this window is already
4884 		 * mapped in, we need to still check this one.
4885 		 */
4886 		pmap = &dma->dp_pgmap[pidx];
4887 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4888 			if (pmap->pm_pp != NULL) {
4889 				pmap->pm_mapped = B_TRUE;
4890 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4891 			} else if (pmap->pm_vaddr != NULL) {
4892 				pmap->pm_mapped = B_TRUE;
4893 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4894 				    pmap->pm_kaddr);
4895 			}
4896 		}
4897 		pidx++;
4898 
4899 		/* map in the rest of the pages if required */
4900 		if (window->wd_remap_copybuf) {
4901 			window->wd_remap_copybuf = B_FALSE;
4902 
4903 			/* figure out many pages this window takes up */
4904 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4905 			    MMU_PAGEOFFSET;
4906 			pcnt = mmu_btopr(window->wd_size + poff);
4907 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4908 
4909 			/* map pages which require it */
4910 			for (i = 1; i < pcnt; i++) {
4911 				pmap = &dma->dp_pgmap[pidx];
4912 				if (pmap->pm_uses_copybuf) {
4913 					ASSERT(pmap->pm_mapped == B_FALSE);
4914 					if (pmap->pm_pp != NULL) {
4915 						pmap->pm_mapped = B_TRUE;
4916 						i86_pp_map(pmap->pm_pp,
4917 						    pmap->pm_kaddr);
4918 					} else if (pmap->pm_vaddr != NULL) {
4919 						pmap->pm_mapped = B_TRUE;
4920 						i86_va_map(pmap->pm_vaddr,
4921 						    sinfo->si_asp,
4922 						    pmap->pm_kaddr);
4923 					}
4924 				}
4925 				pidx++;
4926 			}
4927 		}
4928 	}
4929 #endif
4930 
4931 	/* if the new window uses the copy buffer, sync it for the device */
4932 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4933 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4934 		    DDI_DMA_SYNC_FORDEV);
4935 	}
4936 
4937 	return (DDI_SUCCESS);
4938 }
4939 
4940 /*
4941  * rootnex_dma_win()
4942  *    called from ddi_dma_getwin()
4943  */
4944 /*ARGSUSED*/
4945 static int
4946 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4947     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4948     uint_t *ccountp)
4949 {
4950 #if defined(__amd64) && !defined(__xpv)
4951 	if (IOMMU_USED(rdip)) {
4952 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
4953 		    cookiep, ccountp));
4954 	}
4955 #endif
4956 
4957 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
4958 	    cookiep, ccountp));
4959 }
4960 
4961 #if defined(__amd64) && !defined(__xpv)
4962 /*ARGSUSED*/
4963 static int
4964 rootnex_coredma_hdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
4965     ddi_dma_handle_t handle, void *v)
4966 {
4967 	ddi_dma_impl_t *hp;
4968 	rootnex_dma_t *dma;
4969 
4970 	hp = (ddi_dma_impl_t *)handle;
4971 	dma = (rootnex_dma_t *)hp->dmai_private;
4972 	dma->dp_iommu_private = v;
4973 
4974 	return (DDI_SUCCESS);
4975 }
4976 
4977 /*ARGSUSED*/
4978 static void *
4979 rootnex_coredma_hdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
4980     ddi_dma_handle_t handle)
4981 {
4982 	ddi_dma_impl_t *hp;
4983 	rootnex_dma_t *dma;
4984 
4985 	hp = (ddi_dma_impl_t *)handle;
4986 	dma = (rootnex_dma_t *)hp->dmai_private;
4987 
4988 	return (dma->dp_iommu_private);
4989 }
4990 #endif
4991 
4992 /*
4993  * ************************
4994  *  obsoleted dma routines
4995  * ************************
4996  */
4997 
4998 /*
4999  * rootnex_dma_mctl()
5000  *
5001  * We don't support this legacy interface any more on x86.
5002  */
5003 /* ARGSUSED */
5004 static int
5005 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
5006     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
5007     uint_t cache_flags)
5008 {
5009 	/*
5010 	 * The only thing dma_mctl is usef for anymore is legacy SPARC
5011 	 * dvma and sbus-specific routines.
5012 	 */
5013 	return (DDI_FAILURE);
5014 }
5015 
5016 /*
5017  * *********
5018  *  FMA Code
5019  * *********
5020  */
5021 
5022 /*
5023  * rootnex_fm_init()
5024  *    FMA init busop
5025  */
5026 /* ARGSUSED */
5027 static int
5028 rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
5029     ddi_iblock_cookie_t *ibc)
5030 {
5031 	*ibc = rootnex_state->r_err_ibc;
5032 
5033 	return (ddi_system_fmcap);
5034 }
5035 
5036 /*
5037  * rootnex_dma_check()
5038  *    Function called after a dma fault occurred to find out whether the
5039  *    fault address is associated with a driver that is able to handle faults
5040  *    and recover from faults.
5041  */
5042 /* ARGSUSED */
5043 static int
5044 rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
5045     const void *not_used)
5046 {
5047 	rootnex_window_t *window;
5048 	uint64_t start_addr;
5049 	uint64_t fault_addr;
5050 	ddi_dma_impl_t *hp;
5051 	rootnex_dma_t *dma;
5052 	uint64_t end_addr;
5053 	size_t csize;
5054 	int i;
5055 	int j;
5056 
5057 
5058 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5059 	hp = (ddi_dma_impl_t *)handle;
5060 	ASSERT(hp);
5061 
5062 	dma = (rootnex_dma_t *)hp->dmai_private;
5063 
5064 	/* Get the address that we need to search for */
5065 	fault_addr = *(uint64_t *)addr;
5066 
5067 	/*
5068 	 * if we don't have any windows, we can just walk through all the
5069 	 * cookies.
5070 	 */
5071 	if (dma->dp_window == NULL) {
5072 		/* for each cookie */
5073 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
5074 			/*
5075 			 * if the faulted address is within the physical address
5076 			 * range of the cookie, return DDI_FM_NONFATAL.
5077 			 */
5078 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
5079 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
5080 			    dma->dp_cookies[i].dmac_size))) {
5081 				return (DDI_FM_NONFATAL);
5082 			}
5083 		}
5084 
5085 		/* fault_addr not within this DMA handle */
5086 		return (DDI_FM_UNKNOWN);
5087 	}
5088 
5089 	/* we have mutiple windows, walk through each window */
5090 	for (i = 0; i < hp->dmai_nwin; i++) {
5091 		window = &dma->dp_window[i];
5092 
5093 		/* Go through all the cookies in the window */
5094 		for (j = 0; j < window->wd_cookie_cnt; j++) {
5095 
5096 			start_addr = window->wd_first_cookie[j].dmac_laddress;
5097 			csize = window->wd_first_cookie[j].dmac_size;
5098 
5099 			/*
5100 			 * if we are trimming the first cookie in the window,
5101 			 * and this is the first cookie, adjust the start
5102 			 * address and size of the cookie to account for the
5103 			 * trim.
5104 			 */
5105 			if (window->wd_trim.tr_trim_first && (j == 0)) {
5106 				start_addr = window->wd_trim.tr_first_paddr;
5107 				csize = window->wd_trim.tr_first_size;
5108 			}
5109 
5110 			/*
5111 			 * if we are trimming the last cookie in the window,
5112 			 * and this is the last cookie, adjust the start
5113 			 * address and size of the cookie to account for the
5114 			 * trim.
5115 			 */
5116 			if (window->wd_trim.tr_trim_last &&
5117 			    (j == (window->wd_cookie_cnt - 1))) {
5118 				start_addr = window->wd_trim.tr_last_paddr;
5119 				csize = window->wd_trim.tr_last_size;
5120 			}
5121 
5122 			end_addr = start_addr + csize;
5123 
5124 			/*
5125 			 * if the faulted address is within the physical
5126 			 * address of the cookie, return DDI_FM_NONFATAL.
5127 			 */
5128 			if ((fault_addr >= start_addr) &&
5129 			    (fault_addr <= end_addr)) {
5130 				return (DDI_FM_NONFATAL);
5131 			}
5132 		}
5133 	}
5134 
5135 	/* fault_addr not within this DMA handle */
5136 	return (DDI_FM_UNKNOWN);
5137 }
5138 
5139 /*ARGSUSED*/
5140 static int
5141 rootnex_quiesce(dev_info_t *dip)
5142 {
5143 #if defined(__amd64) && !defined(__xpv)
5144 	return (immu_quiesce());
5145 #else
5146 	return (DDI_SUCCESS);
5147 #endif
5148 }
5149 
5150 #if defined(__xpv)
5151 void
5152 immu_init(void)
5153 {
5154 	;
5155 }
5156 
5157 void
5158 immu_startup(void)
5159 {
5160 	;
5161 }
5162 /*ARGSUSED*/
5163 void
5164 immu_physmem_update(uint64_t addr, uint64_t size)
5165 {
5166 	;
5167 }
5168 #endif
5169