xref: /titanic_52/usr/src/uts/i86pc/io/rootnex.c (revision f498645a3eecf2ddd304b4ea9c7f1b4c155ff79e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * x86 root nexus driver
30  */
31 
32 #include <sys/sysmacros.h>
33 #include <sys/conf.h>
34 #include <sys/autoconf.h>
35 #include <sys/sysmacros.h>
36 #include <sys/debug.h>
37 #include <sys/psw.h>
38 #include <sys/ddidmareq.h>
39 #include <sys/promif.h>
40 #include <sys/devops.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <vm/seg.h>
44 #include <vm/seg_kmem.h>
45 #include <vm/seg_dev.h>
46 #include <sys/vmem.h>
47 #include <sys/mman.h>
48 #include <vm/hat.h>
49 #include <vm/as.h>
50 #include <vm/page.h>
51 #include <sys/avintr.h>
52 #include <sys/errno.h>
53 #include <sys/modctl.h>
54 #include <sys/ddi_impldefs.h>
55 #include <sys/sunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/mach_intr.h>
58 #include <sys/psm.h>
59 #include <sys/ontrap.h>
60 #include <sys/atomic.h>
61 #include <sys/sdt.h>
62 #include <sys/rootnex.h>
63 #include <vm/hat_i86.h>
64 #include <sys/ddifm.h>
65 
66 /*
67  * enable/disable extra checking of function parameters. Useful for debugging
68  * drivers.
69  */
70 #ifdef	DEBUG
71 int rootnex_alloc_check_parms = 1;
72 int rootnex_bind_check_parms = 1;
73 int rootnex_bind_check_inuse = 1;
74 int rootnex_unbind_verify_buffer = 0;
75 int rootnex_sync_check_parms = 1;
76 #else
77 int rootnex_alloc_check_parms = 0;
78 int rootnex_bind_check_parms = 0;
79 int rootnex_bind_check_inuse = 0;
80 int rootnex_unbind_verify_buffer = 0;
81 int rootnex_sync_check_parms = 0;
82 #endif
83 
84 /* Master Abort and Target Abort panic flag */
85 int rootnex_fm_ma_ta_panic_flag = 0;
86 
87 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
88 int rootnex_bind_fail = 1;
89 int rootnex_bind_warn = 1;
90 uint8_t *rootnex_warn_list;
91 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
92 #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
93 
94 /*
95  * revert back to old broken behavior of always sync'ing entire copy buffer.
96  * This is useful if be have a buggy driver which doesn't correctly pass in
97  * the offset and size into ddi_dma_sync().
98  */
99 int rootnex_sync_ignore_params = 0;
100 
101 /*
102  * maximum size that we will allow for a copy buffer. Can be patched on the
103  * fly
104  */
105 size_t rootnex_max_copybuf_size = 0x100000;
106 
107 /*
108  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
109  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
110  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
111  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
112  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
113  * (< 8K). We will still need to allocate the copy buffer during bind though
114  * (if we need one). These can only be modified in /etc/system before rootnex
115  * attach.
116  */
117 #if defined(__amd64)
118 int rootnex_prealloc_cookies = 65;
119 int rootnex_prealloc_windows = 4;
120 int rootnex_prealloc_copybuf = 2;
121 #else
122 int rootnex_prealloc_cookies = 33;
123 int rootnex_prealloc_windows = 4;
124 int rootnex_prealloc_copybuf = 2;
125 #endif
126 
127 /* driver global state */
128 static rootnex_state_t *rootnex_state;
129 
130 /* shortcut to rootnex counters */
131 static uint64_t *rootnex_cnt;
132 
133 /*
134  * XXX - does x86 even need these or are they left over from the SPARC days?
135  */
136 /* statically defined integer/boolean properties for the root node */
137 static rootnex_intprop_t rootnex_intprp[] = {
138 	{ "PAGESIZE",			PAGESIZE },
139 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
140 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
141 	{ DDI_RELATIVE_ADDRESSING,	1 },
142 };
143 #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
144 
145 
146 static struct cb_ops rootnex_cb_ops = {
147 	nodev,		/* open */
148 	nodev,		/* close */
149 	nodev,		/* strategy */
150 	nodev,		/* print */
151 	nodev,		/* dump */
152 	nodev,		/* read */
153 	nodev,		/* write */
154 	nodev,		/* ioctl */
155 	nodev,		/* devmap */
156 	nodev,		/* mmap */
157 	nodev,		/* segmap */
158 	nochpoll,	/* chpoll */
159 	ddi_prop_op,	/* cb_prop_op */
160 	NULL,		/* struct streamtab */
161 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
162 	CB_REV,		/* Rev */
163 	nodev,		/* cb_aread */
164 	nodev		/* cb_awrite */
165 };
166 
167 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
168     off_t offset, off_t len, caddr_t *vaddrp);
169 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
170     struct hat *hat, struct seg *seg, caddr_t addr,
171     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
172 static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
173     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
174 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
175     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
176     ddi_dma_handle_t *handlep);
177 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
178     ddi_dma_handle_t handle);
179 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
180     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
181     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
182 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
183     ddi_dma_handle_t handle);
184 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
185     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
186 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
187     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
188     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
189 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
190     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
191     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
192 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
193     ddi_ctl_enum_t ctlop, void *arg, void *result);
194 static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
195     ddi_iblock_cookie_t *ibc);
196 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
197     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
198 
199 
200 static struct bus_ops rootnex_bus_ops = {
201 	BUSO_REV,
202 	rootnex_map,
203 	NULL,
204 	NULL,
205 	NULL,
206 	rootnex_map_fault,
207 	rootnex_dma_map,
208 	rootnex_dma_allochdl,
209 	rootnex_dma_freehdl,
210 	rootnex_dma_bindhdl,
211 	rootnex_dma_unbindhdl,
212 	rootnex_dma_sync,
213 	rootnex_dma_win,
214 	rootnex_dma_mctl,
215 	rootnex_ctlops,
216 	ddi_bus_prop_op,
217 	i_ddi_rootnex_get_eventcookie,
218 	i_ddi_rootnex_add_eventcall,
219 	i_ddi_rootnex_remove_eventcall,
220 	i_ddi_rootnex_post_event,
221 	0,			/* bus_intr_ctl */
222 	0,			/* bus_config */
223 	0,			/* bus_unconfig */
224 	rootnex_fm_init,	/* bus_fm_init */
225 	NULL,			/* bus_fm_fini */
226 	NULL,			/* bus_fm_access_enter */
227 	NULL,			/* bus_fm_access_exit */
228 	NULL,			/* bus_powr */
229 	rootnex_intr_ops	/* bus_intr_op */
230 };
231 
232 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
233 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
234 
235 static struct dev_ops rootnex_ops = {
236 	DEVO_REV,
237 	0,
238 	ddi_no_info,
239 	nulldev,
240 	nulldev,
241 	rootnex_attach,
242 	rootnex_detach,
243 	nulldev,
244 	&rootnex_cb_ops,
245 	&rootnex_bus_ops
246 };
247 
248 static struct modldrv rootnex_modldrv = {
249 	&mod_driverops,
250 	"i86pc root nexus %I%",
251 	&rootnex_ops
252 };
253 
254 static struct modlinkage rootnex_modlinkage = {
255 	MODREV_1,
256 	(void *)&rootnex_modldrv,
257 	NULL
258 };
259 
260 
261 /*
262  *  extern hacks
263  */
264 extern struct seg_ops segdev_ops;
265 extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
266 #ifdef	DDI_MAP_DEBUG
267 extern int ddi_map_debug_flag;
268 #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
269 #endif
270 #define	ptob64(x)	(((uint64_t)(x)) << MMU_PAGESHIFT)
271 extern void i86_pp_map(page_t *pp, caddr_t kaddr);
272 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
273 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
274     psm_intr_op_t, int *);
275 extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
276 extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
277 /*
278  * Use device arena to use for device control register mappings.
279  * Various kernel memory walkers (debugger, dtrace) need to know
280  * to avoid this address range to prevent undesired device activity.
281  */
282 extern void *device_arena_alloc(size_t size, int vm_flag);
283 extern void device_arena_free(void * vaddr, size_t size);
284 
285 
286 /*
287  *  Internal functions
288  */
289 static int rootnex_dma_init();
290 static void rootnex_add_props(dev_info_t *);
291 static int rootnex_ctl_reportdev(dev_info_t *dip);
292 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
293 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
294 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
295 static int rootnex_map_handle(ddi_map_req_t *mp);
296 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
297 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
298 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
299     ddi_dma_attr_t *attr);
300 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
301     rootnex_sglinfo_t *sglinfo);
302 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
303     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
304 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
305     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
306 static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
307 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
308     ddi_dma_attr_t *attr, int kmflag);
309 static void rootnex_teardown_windows(rootnex_dma_t *dma);
310 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
311     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
312 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
313     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
314     size_t *copybuf_used, page_t **cur_pp);
315 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
316     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
317     ddi_dma_attr_t *attr, off_t cur_offset);
318 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
319     rootnex_dma_t *dma, rootnex_window_t **windowp,
320     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
321 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
322     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
323 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
324     off_t offset, size_t size, uint_t cache_flags);
325 static int rootnex_verify_buffer(rootnex_dma_t *dma);
326 static int rootnex_dma_check(dev_info_t *dip, const void *handle,
327     const void *comp_addr, const void *not_used);
328 
329 /*
330  * _init()
331  *
332  */
333 int
334 _init(void)
335 {
336 
337 	rootnex_state = NULL;
338 	return (mod_install(&rootnex_modlinkage));
339 }
340 
341 
342 /*
343  * _info()
344  *
345  */
346 int
347 _info(struct modinfo *modinfop)
348 {
349 	return (mod_info(&rootnex_modlinkage, modinfop));
350 }
351 
352 
353 /*
354  * _fini()
355  *
356  */
357 int
358 _fini(void)
359 {
360 	return (EBUSY);
361 }
362 
363 
364 /*
365  * rootnex_attach()
366  *
367  */
368 static int
369 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
370 {
371 	int fmcap;
372 	int e;
373 
374 
375 	switch (cmd) {
376 	case DDI_ATTACH:
377 		break;
378 	case DDI_RESUME:
379 		return (DDI_SUCCESS);
380 	default:
381 		return (DDI_FAILURE);
382 	}
383 
384 	/*
385 	 * We should only have one instance of rootnex. Save it away since we
386 	 * don't have an easy way to get it back later.
387 	 */
388 	ASSERT(rootnex_state == NULL);
389 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
390 
391 	rootnex_state->r_dip = dip;
392 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
393 	rootnex_state->r_reserved_msg_printed = B_FALSE;
394 	rootnex_cnt = &rootnex_state->r_counters[0];
395 
396 	/*
397 	 * Set minimum fm capability level for i86pc platforms and then
398 	 * initialize error handling. Since we're the rootnex, we don't
399 	 * care what's returned in the fmcap field.
400 	 */
401 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
402 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
403 	fmcap = ddi_system_fmcap;
404 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
405 
406 	/* initialize DMA related state */
407 	e = rootnex_dma_init();
408 	if (e != DDI_SUCCESS) {
409 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
410 		return (DDI_FAILURE);
411 	}
412 
413 	/* Add static root node properties */
414 	rootnex_add_props(dip);
415 
416 	/* since we can't call ddi_report_dev() */
417 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
418 
419 	/* Initialize rootnex event handle */
420 	i_ddi_rootnex_init_events(dip);
421 
422 	return (DDI_SUCCESS);
423 }
424 
425 
426 /*
427  * rootnex_detach()
428  *
429  */
430 /*ARGSUSED*/
431 static int
432 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
433 {
434 	switch (cmd) {
435 	case DDI_SUSPEND:
436 		break;
437 	default:
438 		return (DDI_FAILURE);
439 	}
440 
441 	return (DDI_SUCCESS);
442 }
443 
444 
445 /*
446  * rootnex_dma_init()
447  *
448  */
449 /*ARGSUSED*/
450 static int
451 rootnex_dma_init()
452 {
453 	size_t bufsize;
454 
455 
456 	/*
457 	 * size of our cookie/window/copybuf state needed in dma bind that we
458 	 * pre-alloc in dma_alloc_handle
459 	 */
460 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
461 	rootnex_state->r_prealloc_size =
462 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
463 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
464 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
465 
466 	/*
467 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
468 	 * allocate 16 extra bytes for struct pointer alignment
469 	 * (p->dmai_private & dma->dp_prealloc_buffer)
470 	 */
471 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
472 	    rootnex_state->r_prealloc_size + 0x10;
473 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
474 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
475 	if (rootnex_state->r_dmahdl_cache == NULL) {
476 		return (DDI_FAILURE);
477 	}
478 
479 	/*
480 	 * allocate array to track which major numbers we have printed warnings
481 	 * for.
482 	 */
483 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
484 	    KM_SLEEP);
485 
486 	return (DDI_SUCCESS);
487 }
488 
489 
490 /*
491  * rootnex_add_props()
492  *
493  */
494 static void
495 rootnex_add_props(dev_info_t *dip)
496 {
497 	rootnex_intprop_t *rpp;
498 	int i;
499 
500 	/* Add static integer/boolean properties to the root node */
501 	rpp = rootnex_intprp;
502 	for (i = 0; i < NROOT_INTPROPS; i++) {
503 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
504 		    rpp[i].prop_name, rpp[i].prop_value);
505 	}
506 }
507 
508 
509 
510 /*
511  * *************************
512  *  ctlops related routines
513  * *************************
514  */
515 
516 /*
517  * rootnex_ctlops()
518  *
519  */
520 /*ARGSUSED*/
521 static int
522 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
523     void *arg, void *result)
524 {
525 	int n, *ptr;
526 	struct ddi_parent_private_data *pdp;
527 
528 	switch (ctlop) {
529 	case DDI_CTLOPS_DMAPMAPC:
530 		/*
531 		 * Return 'partial' to indicate that dma mapping
532 		 * has to be done in the main MMU.
533 		 */
534 		return (DDI_DMA_PARTIAL);
535 
536 	case DDI_CTLOPS_BTOP:
537 		/*
538 		 * Convert byte count input to physical page units.
539 		 * (byte counts that are not a page-size multiple
540 		 * are rounded down)
541 		 */
542 		*(ulong_t *)result = btop(*(ulong_t *)arg);
543 		return (DDI_SUCCESS);
544 
545 	case DDI_CTLOPS_PTOB:
546 		/*
547 		 * Convert size in physical pages to bytes
548 		 */
549 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
550 		return (DDI_SUCCESS);
551 
552 	case DDI_CTLOPS_BTOPR:
553 		/*
554 		 * Convert byte count input to physical page units
555 		 * (byte counts that are not a page-size multiple
556 		 * are rounded up)
557 		 */
558 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
559 		return (DDI_SUCCESS);
560 
561 	case DDI_CTLOPS_INITCHILD:
562 		return (impl_ddi_sunbus_initchild(arg));
563 
564 	case DDI_CTLOPS_UNINITCHILD:
565 		impl_ddi_sunbus_removechild(arg);
566 		return (DDI_SUCCESS);
567 
568 	case DDI_CTLOPS_REPORTDEV:
569 		return (rootnex_ctl_reportdev(rdip));
570 
571 	case DDI_CTLOPS_IOMIN:
572 		/*
573 		 * Nothing to do here but reflect back..
574 		 */
575 		return (DDI_SUCCESS);
576 
577 	case DDI_CTLOPS_REGSIZE:
578 	case DDI_CTLOPS_NREGS:
579 		break;
580 
581 	case DDI_CTLOPS_SIDDEV:
582 		if (ndi_dev_is_prom_node(rdip))
583 			return (DDI_SUCCESS);
584 		if (ndi_dev_is_persistent_node(rdip))
585 			return (DDI_SUCCESS);
586 		return (DDI_FAILURE);
587 
588 	case DDI_CTLOPS_POWER:
589 		return ((*pm_platform_power)((power_req_t *)arg));
590 
591 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
592 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
593 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
594 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
595 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
596 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
597 		if (!rootnex_state->r_reserved_msg_printed) {
598 			rootnex_state->r_reserved_msg_printed = B_TRUE;
599 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
600 			    "1 or more reserved/obsolete operations.");
601 		}
602 		return (DDI_FAILURE);
603 
604 	default:
605 		return (DDI_FAILURE);
606 	}
607 	/*
608 	 * The rest are for "hardware" properties
609 	 */
610 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
611 		return (DDI_FAILURE);
612 
613 	if (ctlop == DDI_CTLOPS_NREGS) {
614 		ptr = (int *)result;
615 		*ptr = pdp->par_nreg;
616 	} else {
617 		off_t *size = (off_t *)result;
618 
619 		ptr = (int *)arg;
620 		n = *ptr;
621 		if (n >= pdp->par_nreg) {
622 			return (DDI_FAILURE);
623 		}
624 		*size = (off_t)pdp->par_reg[n].regspec_size;
625 	}
626 	return (DDI_SUCCESS);
627 }
628 
629 
630 /*
631  * rootnex_ctl_reportdev()
632  *
633  */
634 static int
635 rootnex_ctl_reportdev(dev_info_t *dev)
636 {
637 	int i, n, len, f_len = 0;
638 	char *buf;
639 
640 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
641 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
642 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
643 	len = strlen(buf);
644 
645 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
646 
647 		struct regspec *rp = sparc_pd_getreg(dev, i);
648 
649 		if (i == 0)
650 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
651 			    ": ");
652 		else
653 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
654 			    " and ");
655 		len = strlen(buf);
656 
657 		switch (rp->regspec_bustype) {
658 
659 		case BTEISA:
660 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
661 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
662 			break;
663 
664 		case BTISA:
665 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
666 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
667 			break;
668 
669 		default:
670 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
671 			    "space %x offset %x",
672 			    rp->regspec_bustype, rp->regspec_addr);
673 			break;
674 		}
675 		len = strlen(buf);
676 	}
677 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
678 		int pri;
679 
680 		if (i != 0) {
681 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
682 			    ",");
683 			len = strlen(buf);
684 		}
685 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
686 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
687 		    " sparc ipl %d", pri);
688 		len = strlen(buf);
689 	}
690 #ifdef DEBUG
691 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
692 		cmn_err(CE_NOTE, "next message is truncated: "
693 		    "printed length 1024, real length %d", f_len);
694 	}
695 #endif /* DEBUG */
696 	cmn_err(CE_CONT, "?%s\n", buf);
697 	kmem_free(buf, REPORTDEV_BUFSIZE);
698 	return (DDI_SUCCESS);
699 }
700 
701 
702 /*
703  * ******************
704  *  map related code
705  * ******************
706  */
707 
708 /*
709  * rootnex_map()
710  *
711  */
712 static int
713 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
714     off_t len, caddr_t *vaddrp)
715 {
716 	struct regspec *rp, tmp_reg;
717 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
718 	int error;
719 
720 	mp = &mr;
721 
722 	switch (mp->map_op)  {
723 	case DDI_MO_MAP_LOCKED:
724 	case DDI_MO_UNMAP:
725 	case DDI_MO_MAP_HANDLE:
726 		break;
727 	default:
728 #ifdef	DDI_MAP_DEBUG
729 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
730 		    mp->map_op);
731 #endif	/* DDI_MAP_DEBUG */
732 		return (DDI_ME_UNIMPLEMENTED);
733 	}
734 
735 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
736 #ifdef	DDI_MAP_DEBUG
737 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
738 #endif	/* DDI_MAP_DEBUG */
739 		return (DDI_ME_UNIMPLEMENTED);
740 	}
741 
742 	/*
743 	 * First, if given an rnumber, convert it to a regspec...
744 	 * (Presumably, this is on behalf of a child of the root node?)
745 	 */
746 
747 	if (mp->map_type == DDI_MT_RNUMBER)  {
748 
749 		int rnumber = mp->map_obj.rnumber;
750 #ifdef	DDI_MAP_DEBUG
751 		static char *out_of_range =
752 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
753 #endif	/* DDI_MAP_DEBUG */
754 
755 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
756 		if (rp == NULL)  {
757 #ifdef	DDI_MAP_DEBUG
758 			cmn_err(CE_WARN, out_of_range, rnumber,
759 			    ddi_get_name(rdip));
760 #endif	/* DDI_MAP_DEBUG */
761 			return (DDI_ME_RNUMBER_RANGE);
762 		}
763 
764 		/*
765 		 * Convert the given ddi_map_req_t from rnumber to regspec...
766 		 */
767 
768 		mp->map_type = DDI_MT_REGSPEC;
769 		mp->map_obj.rp = rp;
770 	}
771 
772 	/*
773 	 * Adjust offset and length correspnding to called values...
774 	 * XXX: A non-zero length means override the one in the regspec
775 	 * XXX: (regardless of what's in the parent's range?)
776 	 */
777 
778 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
779 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
780 
781 #ifdef	DDI_MAP_DEBUG
782 	cmn_err(CE_CONT,
783 		"rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>"
784 		" offset %d len %d handle 0x%x\n",
785 		ddi_get_name(dip), ddi_get_name(rdip),
786 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
787 		offset, len, mp->map_handlep);
788 #endif	/* DDI_MAP_DEBUG */
789 
790 	/*
791 	 * I/O or memory mapping:
792 	 *
793 	 *	<bustype=0, addr=x, len=x>: memory
794 	 *	<bustype=1, addr=x, len=x>: i/o
795 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
796 	 */
797 
798 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
799 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
800 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
801 		    ddi_get_name(rdip), rp->regspec_bustype,
802 		    rp->regspec_addr, rp->regspec_size);
803 		return (DDI_ME_INVAL);
804 	}
805 
806 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
807 		/*
808 		 * compatibility i/o mapping
809 		 */
810 		rp->regspec_bustype += (uint_t)offset;
811 	} else {
812 		/*
813 		 * Normal memory or i/o mapping
814 		 */
815 		rp->regspec_addr += (uint_t)offset;
816 	}
817 
818 	if (len != 0)
819 		rp->regspec_size = (uint_t)len;
820 
821 #ifdef	DDI_MAP_DEBUG
822 	cmn_err(CE_CONT,
823 		"             <%s,%s> <0x%x, 0x%x, 0x%d>"
824 		" offset %d len %d handle 0x%x\n",
825 		ddi_get_name(dip), ddi_get_name(rdip),
826 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
827 		offset, len, mp->map_handlep);
828 #endif	/* DDI_MAP_DEBUG */
829 
830 	/*
831 	 * Apply any parent ranges at this level, if applicable.
832 	 * (This is where nexus specific regspec translation takes place.
833 	 * Use of this function is implicit agreement that translation is
834 	 * provided via ddi_apply_range.)
835 	 */
836 
837 #ifdef	DDI_MAP_DEBUG
838 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
839 	    ddi_get_name(dip), ddi_get_name(rdip));
840 #endif	/* DDI_MAP_DEBUG */
841 
842 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
843 		return (error);
844 
845 	switch (mp->map_op)  {
846 	case DDI_MO_MAP_LOCKED:
847 
848 		/*
849 		 * Set up the locked down kernel mapping to the regspec...
850 		 */
851 
852 		return (rootnex_map_regspec(mp, vaddrp));
853 
854 	case DDI_MO_UNMAP:
855 
856 		/*
857 		 * Release mapping...
858 		 */
859 
860 		return (rootnex_unmap_regspec(mp, vaddrp));
861 
862 	case DDI_MO_MAP_HANDLE:
863 
864 		return (rootnex_map_handle(mp));
865 
866 	default:
867 		return (DDI_ME_UNIMPLEMENTED);
868 	}
869 }
870 
871 
872 /*
873  * rootnex_map_fault()
874  *
875  *	fault in mappings for requestors
876  */
877 /*ARGSUSED*/
878 static int
879 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
880     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
881     uint_t lock)
882 {
883 
884 #ifdef	DDI_MAP_DEBUG
885 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
886 	ddi_map_debug(" Seg <%s>\n",
887 	    seg->s_ops == &segdev_ops ? "segdev" :
888 	    seg == &kvseg ? "segkmem" : "NONE!");
889 #endif	/* DDI_MAP_DEBUG */
890 
891 	/*
892 	 * This is all terribly broken, but it is a start
893 	 *
894 	 * XXX	Note that this test means that segdev_ops
895 	 *	must be exported from seg_dev.c.
896 	 * XXX	What about devices with their own segment drivers?
897 	 */
898 	if (seg->s_ops == &segdev_ops) {
899 		struct segdev_data *sdp =
900 			(struct segdev_data *)seg->s_data;
901 
902 		if (hat == NULL) {
903 			/*
904 			 * This is one plausible interpretation of
905 			 * a null hat i.e. use the first hat on the
906 			 * address space hat list which by convention is
907 			 * the hat of the system MMU.  At alternative
908 			 * would be to panic .. this might well be better ..
909 			 */
910 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
911 			hat = seg->s_as->a_hat;
912 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
913 		}
914 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
915 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
916 	} else if (seg == &kvseg && dp == NULL) {
917 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
918 		    HAT_LOAD_LOCK);
919 	} else
920 		return (DDI_FAILURE);
921 	return (DDI_SUCCESS);
922 }
923 
924 
925 /*
926  * rootnex_map_regspec()
927  *     we don't support mapping of I/O cards above 4Gb
928  */
929 static int
930 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
931 {
932 	ulong_t base;
933 	void *cvaddr;
934 	uint_t npages, pgoffset;
935 	struct regspec *rp;
936 	ddi_acc_hdl_t *hp;
937 	ddi_acc_impl_t *ap;
938 	uint_t	hat_acc_flags;
939 
940 	rp = mp->map_obj.rp;
941 	hp = mp->map_handlep;
942 
943 #ifdef	DDI_MAP_DEBUG
944 	ddi_map_debug(
945 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
946 	    rp->regspec_bustype, rp->regspec_addr,
947 	    rp->regspec_size, mp->map_handlep);
948 #endif	/* DDI_MAP_DEBUG */
949 
950 	/*
951 	 * I/O or memory mapping
952 	 *
953 	 *	<bustype=0, addr=x, len=x>: memory
954 	 *	<bustype=1, addr=x, len=x>: i/o
955 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
956 	 */
957 
958 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
959 		cmn_err(CE_WARN, "rootnex: invalid register spec"
960 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
961 		    rp->regspec_addr, rp->regspec_size);
962 		return (DDI_FAILURE);
963 	}
964 
965 	if (rp->regspec_bustype != 0) {
966 		/*
967 		 * I/O space - needs a handle.
968 		 */
969 		if (hp == NULL) {
970 			return (DDI_FAILURE);
971 		}
972 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
973 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
974 		impl_acc_hdl_init(hp);
975 
976 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
977 #ifdef  DDI_MAP_DEBUG
978 			ddi_map_debug("rootnex_map_regspec: mmap() \
979 to I/O space is not supported.\n");
980 #endif  /* DDI_MAP_DEBUG */
981 			return (DDI_ME_INVAL);
982 		} else {
983 			/*
984 			 * 1275-compliant vs. compatibility i/o mapping
985 			 */
986 			*vaddrp =
987 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
988 				((caddr_t)(uintptr_t)rp->regspec_bustype) :
989 				((caddr_t)(uintptr_t)rp->regspec_addr);
990 
991 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
992 			    (~MMU_PAGEOFFSET));
993 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
994 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
995 		}
996 
997 #ifdef	DDI_MAP_DEBUG
998 		ddi_map_debug(
999 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1000 		    rp->regspec_size, *vaddrp);
1001 #endif	/* DDI_MAP_DEBUG */
1002 		return (DDI_SUCCESS);
1003 	}
1004 
1005 	/*
1006 	 * Memory space
1007 	 */
1008 
1009 	if (hp != NULL) {
1010 		/*
1011 		 * hat layer ignores
1012 		 * hp->ah_acc.devacc_attr_endian_flags.
1013 		 */
1014 		switch (hp->ah_acc.devacc_attr_dataorder) {
1015 		case DDI_STRICTORDER_ACC:
1016 			hat_acc_flags = HAT_STRICTORDER;
1017 			break;
1018 		case DDI_UNORDERED_OK_ACC:
1019 			hat_acc_flags = HAT_UNORDERED_OK;
1020 			break;
1021 		case DDI_MERGING_OK_ACC:
1022 			hat_acc_flags = HAT_MERGING_OK;
1023 			break;
1024 		case DDI_LOADCACHING_OK_ACC:
1025 			hat_acc_flags = HAT_LOADCACHING_OK;
1026 			break;
1027 		case DDI_STORECACHING_OK_ACC:
1028 			hat_acc_flags = HAT_STORECACHING_OK;
1029 			break;
1030 		}
1031 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1032 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1033 		impl_acc_hdl_init(hp);
1034 		hp->ah_hat_flags = hat_acc_flags;
1035 	} else {
1036 		hat_acc_flags = HAT_STRICTORDER;
1037 	}
1038 
1039 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1040 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
1041 
1042 	if (rp->regspec_size == 0) {
1043 #ifdef  DDI_MAP_DEBUG
1044 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1045 #endif  /* DDI_MAP_DEBUG */
1046 		return (DDI_ME_INVAL);
1047 	}
1048 
1049 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1050 		*vaddrp = (caddr_t)mmu_btop(base);
1051 	} else {
1052 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1053 
1054 #ifdef	DDI_MAP_DEBUG
1055 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages \
1056 physical %x ",
1057 		    npages, base);
1058 #endif	/* DDI_MAP_DEBUG */
1059 
1060 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1061 		if (cvaddr == NULL)
1062 			return (DDI_ME_NORESOURCES);
1063 
1064 		/*
1065 		 * Now map in the pages we've allocated...
1066 		 */
1067 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
1068 		    mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK);
1069 		*vaddrp = (caddr_t)cvaddr + pgoffset;
1070 
1071 		/* save away pfn and npages for FMA */
1072 		hp = mp->map_handlep;
1073 		if (hp) {
1074 			hp->ah_pfn = mmu_btop(base);
1075 			hp->ah_pnum = npages;
1076 		}
1077 	}
1078 
1079 #ifdef	DDI_MAP_DEBUG
1080 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1081 #endif	/* DDI_MAP_DEBUG */
1082 	return (DDI_SUCCESS);
1083 }
1084 
1085 
1086 /*
1087  * rootnex_unmap_regspec()
1088  *
1089  */
1090 static int
1091 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1092 {
1093 	caddr_t addr = (caddr_t)*vaddrp;
1094 	uint_t npages, pgoffset;
1095 	struct regspec *rp;
1096 
1097 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1098 		return (0);
1099 
1100 	rp = mp->map_obj.rp;
1101 
1102 	if (rp->regspec_size == 0) {
1103 #ifdef  DDI_MAP_DEBUG
1104 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1105 #endif  /* DDI_MAP_DEBUG */
1106 		return (DDI_ME_INVAL);
1107 	}
1108 
1109 	/*
1110 	 * I/O or memory mapping:
1111 	 *
1112 	 *	<bustype=0, addr=x, len=x>: memory
1113 	 *	<bustype=1, addr=x, len=x>: i/o
1114 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1115 	 */
1116 	if (rp->regspec_bustype != 0) {
1117 		/*
1118 		 * This is I/O space, which requires no particular
1119 		 * processing on unmap since it isn't mapped in the
1120 		 * first place.
1121 		 */
1122 		return (DDI_SUCCESS);
1123 	}
1124 
1125 	/*
1126 	 * Memory space
1127 	 */
1128 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1129 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1130 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1131 	device_arena_free(addr - pgoffset, ptob(npages));
1132 
1133 	/*
1134 	 * Destroy the pointer - the mapping has logically gone
1135 	 */
1136 	*vaddrp = NULL;
1137 
1138 	return (DDI_SUCCESS);
1139 }
1140 
1141 
1142 /*
1143  * rootnex_map_handle()
1144  *
1145  */
1146 static int
1147 rootnex_map_handle(ddi_map_req_t *mp)
1148 {
1149 	ddi_acc_hdl_t *hp;
1150 	ulong_t base;
1151 	uint_t pgoffset;
1152 	struct regspec *rp;
1153 
1154 	rp = mp->map_obj.rp;
1155 
1156 #ifdef	DDI_MAP_DEBUG
1157 	ddi_map_debug(
1158 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1159 	    rp->regspec_bustype, rp->regspec_addr,
1160 	    rp->regspec_size, mp->map_handlep);
1161 #endif	/* DDI_MAP_DEBUG */
1162 
1163 	/*
1164 	 * I/O or memory mapping:
1165 	 *
1166 	 *	<bustype=0, addr=x, len=x>: memory
1167 	 *	<bustype=1, addr=x, len=x>: i/o
1168 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1169 	 */
1170 	if (rp->regspec_bustype != 0) {
1171 		/*
1172 		 * This refers to I/O space, and we don't support "mapping"
1173 		 * I/O space to a user.
1174 		 */
1175 		return (DDI_FAILURE);
1176 	}
1177 
1178 	/*
1179 	 * Set up the hat_flags for the mapping.
1180 	 */
1181 	hp = mp->map_handlep;
1182 
1183 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1184 	case DDI_NEVERSWAP_ACC:
1185 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1186 		break;
1187 	case DDI_STRUCTURE_LE_ACC:
1188 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1189 		break;
1190 	case DDI_STRUCTURE_BE_ACC:
1191 		return (DDI_FAILURE);
1192 	default:
1193 		return (DDI_REGS_ACC_CONFLICT);
1194 	}
1195 
1196 	switch (hp->ah_acc.devacc_attr_dataorder) {
1197 	case DDI_STRICTORDER_ACC:
1198 		break;
1199 	case DDI_UNORDERED_OK_ACC:
1200 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1201 		break;
1202 	case DDI_MERGING_OK_ACC:
1203 		hp->ah_hat_flags |= HAT_MERGING_OK;
1204 		break;
1205 	case DDI_LOADCACHING_OK_ACC:
1206 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1207 		break;
1208 	case DDI_STORECACHING_OK_ACC:
1209 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1210 		break;
1211 	default:
1212 		return (DDI_FAILURE);
1213 	}
1214 
1215 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1216 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
1217 
1218 	if (rp->regspec_size == 0)
1219 		return (DDI_ME_INVAL);
1220 
1221 	hp->ah_pfn = mmu_btop(base);
1222 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1223 
1224 	return (DDI_SUCCESS);
1225 }
1226 
1227 
1228 
1229 /*
1230  * ************************
1231  *  interrupt related code
1232  * ************************
1233  */
1234 
1235 /*
1236  * rootnex_intr_ops()
1237  *	bus_intr_op() function for interrupt support
1238  */
1239 /* ARGSUSED */
1240 static int
1241 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1242     ddi_intr_handle_impl_t *hdlp, void *result)
1243 {
1244 	struct intrspec			*ispec;
1245 	struct ddi_parent_private_data	*pdp;
1246 
1247 	DDI_INTR_NEXDBG((CE_CONT,
1248 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1249 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
1250 
1251 	/* Process the interrupt operation */
1252 	switch (intr_op) {
1253 	case DDI_INTROP_GETCAP:
1254 		/* First check with pcplusmp */
1255 		if (psm_intr_ops == NULL)
1256 			return (DDI_FAILURE);
1257 
1258 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
1259 			*(int *)result = 0;
1260 			return (DDI_FAILURE);
1261 		}
1262 		break;
1263 	case DDI_INTROP_SETCAP:
1264 		if (psm_intr_ops == NULL)
1265 			return (DDI_FAILURE);
1266 
1267 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
1268 			return (DDI_FAILURE);
1269 		break;
1270 	case DDI_INTROP_ALLOC:
1271 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1272 			return (DDI_FAILURE);
1273 		hdlp->ih_pri = ispec->intrspec_pri;
1274 		*(int *)result = hdlp->ih_scratch1;
1275 		break;
1276 	case DDI_INTROP_FREE:
1277 		pdp = ddi_get_parent_data(rdip);
1278 		/*
1279 		 * Special case for 'pcic' driver' only.
1280 		 * If an intrspec was created for it, clean it up here
1281 		 * See detailed comments on this in the function
1282 		 * rootnex_get_ispec().
1283 		 */
1284 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1285 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
1286 			    pdp->par_nintr);
1287 			/*
1288 			 * Set it to zero; so that
1289 			 * DDI framework doesn't free it again
1290 			 */
1291 			pdp->par_intr = NULL;
1292 			pdp->par_nintr = 0;
1293 		}
1294 		break;
1295 	case DDI_INTROP_GETPRI:
1296 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1297 			return (DDI_FAILURE);
1298 		*(int *)result = ispec->intrspec_pri;
1299 		break;
1300 	case DDI_INTROP_SETPRI:
1301 		/* Validate the interrupt priority passed to us */
1302 		if (*(int *)result > LOCK_LEVEL)
1303 			return (DDI_FAILURE);
1304 
1305 		/* Ensure that PSM is all initialized and ispec is ok */
1306 		if ((psm_intr_ops == NULL) ||
1307 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
1308 			return (DDI_FAILURE);
1309 
1310 		/* Change the priority */
1311 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
1312 		    PSM_FAILURE)
1313 			return (DDI_FAILURE);
1314 
1315 		/* update the ispec with the new priority */
1316 		ispec->intrspec_pri =  *(int *)result;
1317 		break;
1318 	case DDI_INTROP_ADDISR:
1319 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1320 			return (DDI_FAILURE);
1321 		ispec->intrspec_func = hdlp->ih_cb_func;
1322 		break;
1323 	case DDI_INTROP_REMISR:
1324 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1325 			return (DDI_FAILURE);
1326 		ispec->intrspec_func = (uint_t (*)()) 0;
1327 		break;
1328 	case DDI_INTROP_ENABLE:
1329 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1330 			return (DDI_FAILURE);
1331 
1332 		/* Call psmi to translate irq with the dip */
1333 		if (psm_intr_ops == NULL)
1334 			return (DDI_FAILURE);
1335 
1336 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1337 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
1338 		    (int *)&hdlp->ih_vector);
1339 
1340 		/* Add the interrupt handler */
1341 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
1342 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1343 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
1344 			return (DDI_FAILURE);
1345 		break;
1346 	case DDI_INTROP_DISABLE:
1347 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1348 			return (DDI_FAILURE);
1349 
1350 		/* Call psm_ops() to translate irq with the dip */
1351 		if (psm_intr_ops == NULL)
1352 			return (DDI_FAILURE);
1353 
1354 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1355 		(void) (*psm_intr_ops)(rdip, hdlp,
1356 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
1357 
1358 		/* Remove the interrupt handler */
1359 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
1360 		    hdlp->ih_cb_func, hdlp->ih_vector);
1361 		break;
1362 	case DDI_INTROP_SETMASK:
1363 		if (psm_intr_ops == NULL)
1364 			return (DDI_FAILURE);
1365 
1366 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
1367 			return (DDI_FAILURE);
1368 		break;
1369 	case DDI_INTROP_CLRMASK:
1370 		if (psm_intr_ops == NULL)
1371 			return (DDI_FAILURE);
1372 
1373 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
1374 			return (DDI_FAILURE);
1375 		break;
1376 	case DDI_INTROP_GETPENDING:
1377 		if (psm_intr_ops == NULL)
1378 			return (DDI_FAILURE);
1379 
1380 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
1381 		    result)) {
1382 			*(int *)result = 0;
1383 			return (DDI_FAILURE);
1384 		}
1385 		break;
1386 	case DDI_INTROP_NINTRS:
1387 		if ((pdp = ddi_get_parent_data(rdip)) == NULL)
1388 			return (DDI_FAILURE);
1389 		*(int *)result = pdp->par_nintr;
1390 		if (pdp->par_nintr == 0) {
1391 			/*
1392 			 * Special case for 'pcic' driver' only. This driver
1393 			 * driver is a child of 'isa' and 'rootnex' drivers.
1394 			 *
1395 			 * See detailed comments on this in the function
1396 			 * rootnex_get_ispec().
1397 			 *
1398 			 * Children of 'pcic' send 'NINITR' request all the
1399 			 * way to rootnex driver. But, the 'pdp->par_nintr'
1400 			 * field may not initialized. So, we fake it here
1401 			 * to return 1 (a la what PCMCIA nexus does).
1402 			 */
1403 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
1404 				*(int *)result = 1;
1405 		}
1406 		break;
1407 	case DDI_INTROP_SUPPORTED_TYPES:
1408 		*(int *)result = 0;
1409 		*(int *)result |= DDI_INTR_TYPE_FIXED;	/* Always ... */
1410 		break;
1411 	case DDI_INTROP_NAVAIL:
1412 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1413 			return (DDI_FAILURE);
1414 
1415 		if (psm_intr_ops == NULL) {
1416 			*(int *)result = 1;
1417 			break;
1418 		}
1419 
1420 		/* Priority in the handle not initialized yet */
1421 		hdlp->ih_pri = ispec->intrspec_pri;
1422 		(void) (*psm_intr_ops)(rdip, hdlp,
1423 		    PSM_INTR_OP_NAVAIL_VECTORS, result);
1424 		break;
1425 	default:
1426 		return (DDI_FAILURE);
1427 	}
1428 
1429 	return (DDI_SUCCESS);
1430 }
1431 
1432 
1433 /*
1434  * rootnex_get_ispec()
1435  *	convert an interrupt number to an interrupt specification.
1436  *	The interrupt number determines which interrupt spec will be
1437  *	returned if more than one exists.
1438  *
1439  *	Look into the parent private data area of the 'rdip' to find out
1440  *	the interrupt specification.  First check to make sure there is
1441  *	one that matchs "inumber" and then return a pointer to it.
1442  *
1443  *	Return NULL if one could not be found.
1444  *
1445  *	NOTE: This is needed for rootnex_intr_ops()
1446  */
1447 static struct intrspec *
1448 rootnex_get_ispec(dev_info_t *rdip, int inum)
1449 {
1450 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1451 
1452 	/*
1453 	 * Special case handling for drivers that provide their own
1454 	 * intrspec structures instead of relying on the DDI framework.
1455 	 *
1456 	 * A broken hardware driver in ON could potentially provide its
1457 	 * own intrspec structure, instead of relying on the hardware.
1458 	 * If these drivers are children of 'rootnex' then we need to
1459 	 * continue to provide backward compatibility to them here.
1460 	 *
1461 	 * Following check is a special case for 'pcic' driver which
1462 	 * was found to have broken hardwre andby provides its own intrspec.
1463 	 *
1464 	 * Verbatim comments from this driver are shown here:
1465 	 * "Don't use the ddi_add_intr since we don't have a
1466 	 * default intrspec in all cases."
1467 	 *
1468 	 * Since an 'ispec' may not be always created for it,
1469 	 * check for that and create one if so.
1470 	 *
1471 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1472 	 */
1473 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1474 		pdp->par_nintr = 1;
1475 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1476 		    pdp->par_nintr, KM_SLEEP);
1477 	}
1478 
1479 	/* Validate the interrupt number */
1480 	if (inum >= pdp->par_nintr)
1481 		return (NULL);
1482 
1483 	/* Get the interrupt structure pointer and return that */
1484 	return ((struct intrspec *)&pdp->par_intr[inum]);
1485 }
1486 
1487 
1488 /*
1489  * ******************
1490  *  dma related code
1491  * ******************
1492  */
1493 
1494 /*
1495  * rootnex_dma_allochdl()
1496  *    called from ddi_dma_alloc_handle().
1497  */
1498 /*ARGSUSED*/
1499 static int
1500 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1501     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1502 {
1503 	uint64_t maxsegmentsize_ll;
1504 	uint_t maxsegmentsize;
1505 	ddi_dma_impl_t *hp;
1506 	rootnex_dma_t *dma;
1507 	uint64_t count_max;
1508 	uint64_t seg;
1509 	int kmflag;
1510 	int e;
1511 
1512 
1513 	/* convert our sleep flags */
1514 	if (waitfp == DDI_DMA_SLEEP) {
1515 		kmflag = KM_SLEEP;
1516 	} else {
1517 		kmflag = KM_NOSLEEP;
1518 	}
1519 
1520 	/*
1521 	 * We try to do only one memory allocation here. We'll do a little
1522 	 * pointer manipulation later. If the bind ends up taking more than
1523 	 * our prealloc's space, we'll have to allocate more memory in the
1524 	 * bind operation. Not great, but much better than before and the
1525 	 * best we can do with the current bind interfaces.
1526 	 */
1527 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1528 	if (hp == NULL) {
1529 		if (waitfp != DDI_DMA_DONTWAIT) {
1530 			ddi_set_callback(waitfp, arg,
1531 			    &rootnex_state->r_dvma_call_list_id);
1532 		}
1533 		return (DDI_DMA_NORESOURCES);
1534 	}
1535 
1536 	/* Do our pointer manipulation now, align the structures */
1537 	hp->dmai_private = (void *)(((uintptr_t)hp +
1538 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1539 	dma = (rootnex_dma_t *)hp->dmai_private;
1540 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1541 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1542 
1543 	/* setup the handle */
1544 	rootnex_clean_dmahdl(hp);
1545 	dma->dp_dip = rdip;
1546 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1547 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1548 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1549 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1550 	hp->dmai_rdip = rdip;
1551 	hp->dmai_attr = *attr;
1552 
1553 	/* we don't need to worry about the SPL since we do a tryenter */
1554 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1555 
1556 	/*
1557 	 * Figure out our maximum segment size. If the segment size is greater
1558 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1559 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1560 	 * dma_attr_count_max are size-1 type values.
1561 	 *
1562 	 * Maximum segment size is the largest physically contiguous chunk of
1563 	 * memory that we can return from a bind (i.e. the maximum size of a
1564 	 * single cookie).
1565 	 */
1566 
1567 	/* handle the rollover cases */
1568 	seg = attr->dma_attr_seg + 1;
1569 	if (seg < attr->dma_attr_seg) {
1570 		seg = attr->dma_attr_seg;
1571 	}
1572 	count_max = attr->dma_attr_count_max + 1;
1573 	if (count_max < attr->dma_attr_count_max) {
1574 		count_max = attr->dma_attr_count_max;
1575 	}
1576 
1577 	/*
1578 	 * granularity may or may not be a power of two. If it isn't, we can't
1579 	 * use a simple mask.
1580 	 */
1581 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1582 		dma->dp_granularity_power_2 = B_FALSE;
1583 	} else {
1584 		dma->dp_granularity_power_2 = B_TRUE;
1585 	}
1586 
1587 	/*
1588 	 * maxxfer should be a whole multiple of granularity. If we're going to
1589 	 * break up a window because we're greater than maxxfer, we might as
1590 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1591 	 * worry about triming the window later on for this case.
1592 	 */
1593 	if (attr->dma_attr_granular > 1) {
1594 		if (dma->dp_granularity_power_2) {
1595 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1596 			    (attr->dma_attr_maxxfer &
1597 			    (attr->dma_attr_granular - 1));
1598 		} else {
1599 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1600 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1601 		}
1602 	} else {
1603 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1604 	}
1605 
1606 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1607 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1608 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1609 		maxsegmentsize = 0xFFFFFFFF;
1610 	} else {
1611 		maxsegmentsize = maxsegmentsize_ll;
1612 	}
1613 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1614 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1615 
1616 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1617 	if (rootnex_alloc_check_parms) {
1618 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1619 		if (e != DDI_SUCCESS) {
1620 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1621 			(void) rootnex_dma_freehdl(dip, rdip,
1622 			    (ddi_dma_handle_t)hp);
1623 			return (e);
1624 		}
1625 	}
1626 
1627 	*handlep = (ddi_dma_handle_t)hp;
1628 
1629 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1630 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
1631 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1632 
1633 	return (DDI_SUCCESS);
1634 }
1635 
1636 
1637 /*
1638  * rootnex_dma_freehdl()
1639  *    called from ddi_dma_free_handle().
1640  */
1641 /*ARGSUSED*/
1642 static int
1643 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1644 {
1645 	ddi_dma_impl_t *hp;
1646 	rootnex_dma_t *dma;
1647 
1648 
1649 	hp = (ddi_dma_impl_t *)handle;
1650 	dma = (rootnex_dma_t *)hp->dmai_private;
1651 
1652 	/* unbind should have been called first */
1653 	ASSERT(!dma->dp_inuse);
1654 
1655 	mutex_destroy(&dma->dp_mutex);
1656 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1657 
1658 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1659 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
1660 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1661 
1662 	if (rootnex_state->r_dvma_call_list_id)
1663 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1664 
1665 	return (DDI_SUCCESS);
1666 }
1667 
1668 
1669 /*
1670  * rootnex_dma_bindhdl()
1671  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
1672  */
1673 /*ARGSUSED*/
1674 static int
1675 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1676     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1677 {
1678 	rootnex_sglinfo_t *sinfo;
1679 	ddi_dma_attr_t *attr;
1680 	ddi_dma_impl_t *hp;
1681 	rootnex_dma_t *dma;
1682 	int kmflag;
1683 	int e;
1684 
1685 
1686 	hp = (ddi_dma_impl_t *)handle;
1687 	dma = (rootnex_dma_t *)hp->dmai_private;
1688 	sinfo = &dma->dp_sglinfo;
1689 	attr = &hp->dmai_attr;
1690 
1691 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1692 
1693 	/*
1694 	 * This is useful for debugging a driver. Not as useful in a production
1695 	 * system. The only time this will fail is if you have a driver bug.
1696 	 */
1697 	if (rootnex_bind_check_inuse) {
1698 		/*
1699 		 * No one else should ever have this lock unless someone else
1700 		 * is trying to use this handle. So contention on the lock
1701 		 * is the same as inuse being set.
1702 		 */
1703 		e = mutex_tryenter(&dma->dp_mutex);
1704 		if (e == 0) {
1705 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1706 			return (DDI_DMA_INUSE);
1707 		}
1708 		if (dma->dp_inuse) {
1709 			mutex_exit(&dma->dp_mutex);
1710 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1711 			return (DDI_DMA_INUSE);
1712 		}
1713 		dma->dp_inuse = B_TRUE;
1714 		mutex_exit(&dma->dp_mutex);
1715 	}
1716 
1717 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1718 	if (rootnex_bind_check_parms) {
1719 		e = rootnex_valid_bind_parms(dmareq, attr);
1720 		if (e != DDI_SUCCESS) {
1721 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1722 			rootnex_clean_dmahdl(hp);
1723 			return (e);
1724 		}
1725 	}
1726 
1727 	/* save away the original bind info */
1728 	dma->dp_dma = dmareq->dmar_object;
1729 
1730 	/*
1731 	 * Figure out a rough estimate of what maximum number of pages this
1732 	 * buffer could use (a high estimate of course).
1733 	 */
1734 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1735 
1736 	/*
1737 	 * We'll use the pre-allocated cookies for any bind that will *always*
1738 	 * fit (more important to be consistent, we don't want to create
1739 	 * additional degenerate cases).
1740 	 */
1741 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1742 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1743 		dma->dp_need_to_free_cookie = B_FALSE;
1744 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1745 		    uint_t, sinfo->si_max_pages);
1746 
1747 	/*
1748 	 * For anything larger than that, we'll go ahead and allocate the
1749 	 * maximum number of pages we expect to see. Hopefuly, we won't be
1750 	 * seeing this path in the fast path for high performance devices very
1751 	 * frequently.
1752 	 *
1753 	 * a ddi bind interface that allowed the driver to provide storage to
1754 	 * the bind interface would speed this case up.
1755 	 */
1756 	} else {
1757 		/* convert the sleep flags */
1758 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1759 			kmflag =  KM_SLEEP;
1760 		} else {
1761 			kmflag =  KM_NOSLEEP;
1762 		}
1763 
1764 		/*
1765 		 * Save away how much memory we allocated. If we're doing a
1766 		 * nosleep, the alloc could fail...
1767 		 */
1768 		dma->dp_cookie_size = sinfo->si_max_pages *
1769 		    sizeof (ddi_dma_cookie_t);
1770 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
1771 		if (dma->dp_cookies == NULL) {
1772 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1773 			rootnex_clean_dmahdl(hp);
1774 			return (DDI_DMA_NORESOURCES);
1775 		}
1776 		dma->dp_need_to_free_cookie = B_TRUE;
1777 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
1778 		    sinfo->si_max_pages);
1779 	}
1780 	hp->dmai_cookie = dma->dp_cookies;
1781 
1782 	/*
1783 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
1784 	 * looking at the contraints in the dma structure. It will then put some
1785 	 * additional state about the sgl in the dma struct (i.e. is the sgl
1786 	 * clean, or do we need to do some munging; how many pages need to be
1787 	 * copied, etc.)
1788 	 */
1789 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
1790 	    &dma->dp_sglinfo);
1791 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
1792 
1793 	/* if we don't need a copy buffer, we don't need to sync */
1794 	if (sinfo->si_copybuf_req == 0) {
1795 		hp->dmai_rflags |= DMP_NOSYNC;
1796 	}
1797 
1798 	/*
1799 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
1800 	 * cache.
1801 	 */
1802 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
1803 		hp->dmai_error.err_cf = rootnex_dma_check;
1804 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
1805 	}
1806 
1807 	/*
1808 	 * if we don't need the copybuf and we don't need to do a partial,  we
1809 	 * hit the fast path. All the high performance devices should be trying
1810 	 * to hit this path. To hit this path, a device should be able to reach
1811 	 * all of memory, shouldn't try to bind more than it can transfer, and
1812 	 * the buffer shouldn't require more cookies than the driver/device can
1813 	 * handle [sgllen]).
1814 	 */
1815 	if ((sinfo->si_copybuf_req == 0) &&
1816 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
1817 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
1818 		/*
1819 		 * copy out the first cookie and ccountp, set the cookie
1820 		 * pointer to the second cookie. The first cookie is passed
1821 		 * back on the stack. Additional cookies are accessed via
1822 		 * ddi_dma_nextcookie()
1823 		 */
1824 		*cookiep = dma->dp_cookies[0];
1825 		*ccountp = sinfo->si_sgl_size;
1826 		hp->dmai_cookie++;
1827 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1828 		hp->dmai_nwin = 1;
1829 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1830 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
1831 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
1832 		    dma->dp_dma.dmao_size);
1833 		return (DDI_DMA_MAPPED);
1834 	}
1835 
1836 	/*
1837 	 * go to the slow path, we may need to alloc more memory, create
1838 	 * multiple windows, and munge up a sgl to make the device happy.
1839 	 */
1840 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
1841 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
1842 		if (dma->dp_need_to_free_cookie) {
1843 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
1844 		}
1845 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1846 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
1847 		return (e);
1848 	}
1849 
1850 	/* if the first window uses the copy buffer, sync it for the device */
1851 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
1852 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
1853 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
1854 		    DDI_DMA_SYNC_FORDEV);
1855 	}
1856 
1857 	/*
1858 	 * copy out the first cookie and ccountp, set the cookie pointer to the
1859 	 * second cookie. Make sure the partial flag is set/cleared correctly.
1860 	 * If we have a partial map (i.e. multiple windows), the number of
1861 	 * cookies we return is the number of cookies in the first window.
1862 	 */
1863 	if (e == DDI_DMA_MAPPED) {
1864 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1865 		*ccountp = sinfo->si_sgl_size;
1866 	} else {
1867 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
1868 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
1869 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
1870 	}
1871 	*cookiep = dma->dp_cookies[0];
1872 	hp->dmai_cookie++;
1873 
1874 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1875 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
1876 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
1877 	    dma->dp_dma.dmao_size);
1878 	return (e);
1879 }
1880 
1881 
1882 /*
1883  * rootnex_dma_unbindhdl()
1884  *    called from ddi_dma_unbind_handle()
1885  */
1886 /*ARGSUSED*/
1887 static int
1888 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1889     ddi_dma_handle_t handle)
1890 {
1891 	ddi_dma_impl_t *hp;
1892 	rootnex_dma_t *dma;
1893 	int e;
1894 
1895 
1896 	hp = (ddi_dma_impl_t *)handle;
1897 	dma = (rootnex_dma_t *)hp->dmai_private;
1898 
1899 	/* make sure the buffer wasn't free'd before calling unbind */
1900 	if (rootnex_unbind_verify_buffer) {
1901 		e = rootnex_verify_buffer(dma);
1902 		if (e != DDI_SUCCESS) {
1903 			ASSERT(0);
1904 			return (DDI_FAILURE);
1905 		}
1906 	}
1907 
1908 	/* sync the current window before unbinding the buffer */
1909 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
1910 	    (hp->dmai_rflags & DDI_DMA_READ)) {
1911 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
1912 		    DDI_DMA_SYNC_FORCPU);
1913 	}
1914 
1915 	/*
1916 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
1917 	 * cache.
1918 	 */
1919 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
1920 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
1921 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
1922 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
1923 		}
1924 	}
1925 
1926 	/*
1927 	 * cleanup and copy buffer or window state. if we didn't use the copy
1928 	 * buffer or windows, there won't be much to do :-)
1929 	 */
1930 	rootnex_teardown_copybuf(dma);
1931 	rootnex_teardown_windows(dma);
1932 
1933 	/*
1934 	 * If we had to allocate space to for the worse case sgl (it didn't
1935 	 * fit into our pre-allocate buffer), free that up now
1936 	 */
1937 	if (dma->dp_need_to_free_cookie) {
1938 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
1939 	}
1940 
1941 	/*
1942 	 * clean up the handle so it's ready for the next bind (i.e. if the
1943 	 * handle is reused).
1944 	 */
1945 	rootnex_clean_dmahdl(hp);
1946 
1947 	if (rootnex_state->r_dvma_call_list_id)
1948 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1949 
1950 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1951 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
1952 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1953 
1954 	return (DDI_SUCCESS);
1955 }
1956 
1957 
1958 /*
1959  * rootnex_verify_buffer()
1960  *   verify buffer wasn't free'd
1961  */
1962 static int
1963 rootnex_verify_buffer(rootnex_dma_t *dma)
1964 {
1965 	page_t **pplist;
1966 	caddr_t vaddr;
1967 	uint_t pcnt;
1968 	uint_t poff;
1969 	page_t *pp;
1970 	char b;
1971 	int i;
1972 
1973 	/* Figure out how many pages this buffer occupies */
1974 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
1975 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
1976 	} else {
1977 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
1978 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
1979 	}
1980 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
1981 
1982 	switch (dma->dp_dma.dmao_type) {
1983 	case DMA_OTYP_PAGES:
1984 		/*
1985 		 * for a linked list of pp's walk through them to make sure
1986 		 * they're locked and not free.
1987 		 */
1988 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
1989 		for (i = 0; i < pcnt; i++) {
1990 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
1991 				return (DDI_FAILURE);
1992 			}
1993 			pp = pp->p_next;
1994 		}
1995 		break;
1996 
1997 	case DMA_OTYP_VADDR:
1998 	case DMA_OTYP_BUFVADDR:
1999 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2000 		/*
2001 		 * for an array of pp's walk through them to make sure they're
2002 		 * not free. It's possible that they may not be locked.
2003 		 */
2004 		if (pplist) {
2005 			for (i = 0; i < pcnt; i++) {
2006 				if (PP_ISFREE(pplist[i])) {
2007 					return (DDI_FAILURE);
2008 				}
2009 			}
2010 
2011 		/* For a virtual address, try to peek at each page */
2012 		} else {
2013 			if (dma->dp_sglinfo.si_asp == &kas) {
2014 				for (i = 0; i < pcnt; i++) {
2015 					if (ddi_peek8(NULL, vaddr, &b) ==
2016 					    DDI_FAILURE)
2017 						return (DDI_FAILURE);
2018 					vaddr += MMU_PAGESIZE;
2019 				}
2020 			}
2021 		}
2022 		break;
2023 
2024 	default:
2025 		ASSERT(0);
2026 		break;
2027 	}
2028 
2029 	return (DDI_SUCCESS);
2030 }
2031 
2032 
2033 /*
2034  * rootnex_clean_dmahdl()
2035  *    Clean the dma handle. This should be called on a handle alloc and an
2036  *    unbind handle. Set the handle state to the default settings.
2037  */
2038 static void
2039 rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2040 {
2041 	rootnex_dma_t *dma;
2042 
2043 
2044 	dma = (rootnex_dma_t *)hp->dmai_private;
2045 
2046 	hp->dmai_nwin = 0;
2047 	dma->dp_current_cookie = 0;
2048 	dma->dp_copybuf_size = 0;
2049 	dma->dp_window = NULL;
2050 	dma->dp_cbaddr = NULL;
2051 	dma->dp_inuse = B_FALSE;
2052 	dma->dp_need_to_free_cookie = B_FALSE;
2053 	dma->dp_need_to_free_window = B_FALSE;
2054 	dma->dp_partial_required = B_FALSE;
2055 	dma->dp_trim_required = B_FALSE;
2056 	dma->dp_sglinfo.si_copybuf_req = 0;
2057 #if !defined(__amd64)
2058 	dma->dp_cb_remaping = B_FALSE;
2059 	dma->dp_kva = NULL;
2060 #endif
2061 
2062 	/* FMA related initialization */
2063 	hp->dmai_fault = 0;
2064 	hp->dmai_fault_check = NULL;
2065 	hp->dmai_fault_notify = NULL;
2066 	hp->dmai_error.err_ena = 0;
2067 	hp->dmai_error.err_status = DDI_FM_OK;
2068 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2069 	hp->dmai_error.err_ontrap = NULL;
2070 	hp->dmai_error.err_fep = NULL;
2071 	hp->dmai_error.err_cf = NULL;
2072 }
2073 
2074 
2075 /*
2076  * rootnex_valid_alloc_parms()
2077  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2078  */
2079 static int
2080 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2081 {
2082 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2083 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2084 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2085 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2086 		return (DDI_DMA_BADATTR);
2087 	}
2088 
2089 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2090 		return (DDI_DMA_BADATTR);
2091 	}
2092 
2093 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2094 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2095 	    attr->dma_attr_sgllen <= 0) {
2096 		return (DDI_DMA_BADATTR);
2097 	}
2098 
2099 	/* We should be able to DMA into every byte offset in a page */
2100 	if (maxsegmentsize < MMU_PAGESIZE) {
2101 		return (DDI_DMA_BADATTR);
2102 	}
2103 
2104 	return (DDI_SUCCESS);
2105 }
2106 
2107 
2108 /*
2109  * rootnex_valid_bind_parms()
2110  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2111  */
2112 /* ARGSUSED */
2113 static int
2114 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2115 {
2116 #if !defined(__amd64)
2117 	/*
2118 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2119 	 * we can track the offset for the obsoleted interfaces.
2120 	 */
2121 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2122 		return (DDI_DMA_TOOBIG);
2123 	}
2124 #endif
2125 
2126 	return (DDI_SUCCESS);
2127 }
2128 
2129 
2130 /*
2131  * rootnex_get_sgl()
2132  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2133  *    with a call to the vm layer when vm2.0 comes around...
2134  */
2135 static void
2136 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2137     rootnex_sglinfo_t *sglinfo)
2138 {
2139 	ddi_dma_atyp_t buftype;
2140 	uint64_t last_page;
2141 	uint64_t offset;
2142 	uint64_t addrhi;
2143 	uint64_t addrlo;
2144 	uint64_t maxseg;
2145 	page_t **pplist;
2146 	uint64_t paddr;
2147 	uint32_t psize;
2148 	uint32_t size;
2149 	caddr_t vaddr;
2150 	uint_t pcnt;
2151 	page_t *pp;
2152 	uint_t cnt;
2153 
2154 
2155 	/* shortcuts */
2156 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2157 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2158 	maxseg = sglinfo->si_max_cookie_size;
2159 	buftype = dmar_object->dmao_type;
2160 	addrhi = sglinfo->si_max_addr;
2161 	addrlo = sglinfo->si_min_addr;
2162 	size = dmar_object->dmao_size;
2163 
2164 	pcnt = 0;
2165 	cnt = 0;
2166 
2167 	/*
2168 	 * if we were passed down a linked list of pages, i.e. pointer to
2169 	 * page_t, use this to get our physical address and buf offset.
2170 	 */
2171 	if (buftype == DMA_OTYP_PAGES) {
2172 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2173 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2174 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2175 		    MMU_PAGEOFFSET;
2176 		paddr = ptob64(pp->p_pagenum) + offset;
2177 		psize = MIN(size, (MMU_PAGESIZE - offset));
2178 		pp = pp->p_next;
2179 		sglinfo->si_asp = NULL;
2180 
2181 	/*
2182 	 * We weren't passed down a linked list of pages, but if we were passed
2183 	 * down an array of pages, use this to get our physical address and buf
2184 	 * offset.
2185 	 */
2186 	} else if (pplist != NULL) {
2187 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2188 		    (buftype == DMA_OTYP_BUFVADDR));
2189 
2190 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2191 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2192 		if (sglinfo->si_asp == NULL) {
2193 			sglinfo->si_asp = &kas;
2194 		}
2195 
2196 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2197 		paddr = ptob64(pplist[pcnt]->p_pagenum);
2198 		paddr += offset;
2199 		psize = MIN(size, (MMU_PAGESIZE - offset));
2200 		pcnt++;
2201 
2202 	/*
2203 	 * All we have is a virtual address, we'll need to call into the VM
2204 	 * to get the physical address.
2205 	 */
2206 	} else {
2207 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2208 		    (buftype == DMA_OTYP_BUFVADDR));
2209 
2210 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2211 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2212 		if (sglinfo->si_asp == NULL) {
2213 			sglinfo->si_asp = &kas;
2214 		}
2215 
2216 		paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2217 		paddr += offset;
2218 		psize = MIN(size, (MMU_PAGESIZE - offset));
2219 		vaddr += psize;
2220 	}
2221 
2222 	/*
2223 	 * Setup the first cookie with the physical address of the page and the
2224 	 * size of the page (which takes into account the initial offset into
2225 	 * the page.
2226 	 */
2227 	sgl[cnt].dmac_laddress = paddr;
2228 	sgl[cnt].dmac_size = psize;
2229 	sgl[cnt].dmac_type = 0;
2230 
2231 	/*
2232 	 * Save away the buffer offset into the page. We'll need this later in
2233 	 * the copy buffer code to help figure out the page index within the
2234 	 * buffer and the offset into the current page.
2235 	 */
2236 	sglinfo->si_buf_offset = offset;
2237 
2238 	/*
2239 	 * If the DMA engine can't reach the physical address, increase how
2240 	 * much copy buffer we need. We always increase by pagesize so we don't
2241 	 * have to worry about converting offsets. Set a flag in the cookies
2242 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2243 	 * last cookie, go to the next cookie (since we separate each page which
2244 	 * uses the copy buffer in case the copy buffer is not physically
2245 	 * contiguous.
2246 	 */
2247 	if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2248 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2249 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2250 		if ((cnt + 1) < sglinfo->si_max_pages) {
2251 			cnt++;
2252 			sgl[cnt].dmac_laddress = 0;
2253 			sgl[cnt].dmac_size = 0;
2254 			sgl[cnt].dmac_type = 0;
2255 		}
2256 	}
2257 
2258 	/*
2259 	 * save this page's physical address so we can figure out if the next
2260 	 * page is physically contiguous. Keep decrementing size until we are
2261 	 * done with the buffer.
2262 	 */
2263 	last_page = paddr & MMU_PAGEMASK;
2264 	size -= psize;
2265 
2266 	while (size > 0) {
2267 		/* Get the size for this page (i.e. partial or full page) */
2268 		psize = MIN(size, MMU_PAGESIZE);
2269 
2270 		if (buftype == DMA_OTYP_PAGES) {
2271 			/* get the paddr from the page_t */
2272 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2273 			paddr = ptob64(pp->p_pagenum);
2274 			pp = pp->p_next;
2275 		} else if (pplist != NULL) {
2276 			/* index into the array of page_t's to get the paddr */
2277 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2278 			paddr = ptob64(pplist[pcnt]->p_pagenum);
2279 			pcnt++;
2280 		} else {
2281 			/* call into the VM to get the paddr */
2282 			paddr =  ptob64(hat_getpfnum(sglinfo->si_asp->a_hat,
2283 			    vaddr));
2284 			vaddr += psize;
2285 		}
2286 
2287 		/* check to see if this page needs the copy buffer */
2288 		if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2289 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2290 
2291 			/*
2292 			 * if there is something in the current cookie, go to
2293 			 * the next one. We only want one page in a cookie which
2294 			 * uses the copybuf since the copybuf doesn't have to
2295 			 * be physically contiguous.
2296 			 */
2297 			if (sgl[cnt].dmac_size != 0) {
2298 				cnt++;
2299 			}
2300 			sgl[cnt].dmac_laddress = paddr;
2301 			sgl[cnt].dmac_size = psize;
2302 #if defined(__amd64)
2303 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2304 #else
2305 			/*
2306 			 * save the buf offset for 32-bit kernel. used in the
2307 			 * obsoleted interfaces.
2308 			 */
2309 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2310 			    (dmar_object->dmao_size - size);
2311 #endif
2312 			/* if this isn't the last cookie, go to the next one */
2313 			if ((cnt + 1) < sglinfo->si_max_pages) {
2314 				cnt++;
2315 				sgl[cnt].dmac_laddress = 0;
2316 				sgl[cnt].dmac_size = 0;
2317 				sgl[cnt].dmac_type = 0;
2318 			}
2319 
2320 		/*
2321 		 * this page didn't need the copy buffer, if it's not physically
2322 		 * contiguous, or it would put us over a segment boundary, or it
2323 		 * puts us over the max cookie size, or the current sgl doesn't
2324 		 * have anything in it.
2325 		 */
2326 		} else if (((last_page + MMU_PAGESIZE) != paddr) ||
2327 		    !(paddr & sglinfo->si_segmask) ||
2328 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2329 		    (sgl[cnt].dmac_size == 0)) {
2330 			/*
2331 			 * if we're not already in a new cookie, go to the next
2332 			 * cookie.
2333 			 */
2334 			if (sgl[cnt].dmac_size != 0) {
2335 				cnt++;
2336 			}
2337 
2338 			/* save the cookie information */
2339 			sgl[cnt].dmac_laddress = paddr;
2340 			sgl[cnt].dmac_size = psize;
2341 #if defined(__amd64)
2342 			sgl[cnt].dmac_type = 0;
2343 #else
2344 			/*
2345 			 * save the buf offset for 32-bit kernel. used in the
2346 			 * obsoleted interfaces.
2347 			 */
2348 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2349 #endif
2350 
2351 		/*
2352 		 * this page didn't need the copy buffer, it is physically
2353 		 * contiguous with the last page, and it's <= the max cookie
2354 		 * size.
2355 		 */
2356 		} else {
2357 			sgl[cnt].dmac_size += psize;
2358 
2359 			/*
2360 			 * if this exactly ==  the maximum cookie size, and
2361 			 * it isn't the last cookie, go to the next cookie.
2362 			 */
2363 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2364 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2365 				cnt++;
2366 				sgl[cnt].dmac_laddress = 0;
2367 				sgl[cnt].dmac_size = 0;
2368 				sgl[cnt].dmac_type = 0;
2369 			}
2370 		}
2371 
2372 		/*
2373 		 * save this page's physical address so we can figure out if the
2374 		 * next page is physically contiguous. Keep decrementing size
2375 		 * until we are done with the buffer.
2376 		 */
2377 		last_page = paddr;
2378 		size -= psize;
2379 	}
2380 
2381 	/* we're done, save away how many cookies the sgl has */
2382 	if (sgl[cnt].dmac_size == 0) {
2383 		ASSERT(cnt < sglinfo->si_max_pages);
2384 		sglinfo->si_sgl_size = cnt;
2385 	} else {
2386 		sglinfo->si_sgl_size = cnt + 1;
2387 	}
2388 }
2389 
2390 
2391 /*
2392  * rootnex_bind_slowpath()
2393  *    Call in the bind path if the calling driver can't use the sgl without
2394  *    modifying it. We either need to use the copy buffer and/or we will end up
2395  *    with a partial bind.
2396  */
2397 static int
2398 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2399     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2400 {
2401 	rootnex_sglinfo_t *sinfo;
2402 	rootnex_window_t *window;
2403 	ddi_dma_cookie_t *cookie;
2404 	size_t copybuf_used;
2405 	size_t dmac_size;
2406 	boolean_t partial;
2407 	off_t cur_offset;
2408 	page_t *cur_pp;
2409 	major_t mnum;
2410 	int e;
2411 	int i;
2412 
2413 
2414 	sinfo = &dma->dp_sglinfo;
2415 	copybuf_used = 0;
2416 	partial = B_FALSE;
2417 
2418 	/*
2419 	 * If we're using the copybuf, set the copybuf state in dma struct.
2420 	 * Needs to be first since it sets the copy buffer size.
2421 	 */
2422 	if (sinfo->si_copybuf_req != 0) {
2423 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2424 		if (e != DDI_SUCCESS) {
2425 			return (e);
2426 		}
2427 	} else {
2428 		dma->dp_copybuf_size = 0;
2429 	}
2430 
2431 	/*
2432 	 * Figure out if we need to do a partial mapping. If so, figure out
2433 	 * if we need to trim the buffers when we munge the sgl.
2434 	 */
2435 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2436 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2437 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2438 		dma->dp_partial_required = B_TRUE;
2439 		if (attr->dma_attr_granular != 1) {
2440 			dma->dp_trim_required = B_TRUE;
2441 		}
2442 	} else {
2443 		dma->dp_partial_required = B_FALSE;
2444 		dma->dp_trim_required = B_FALSE;
2445 	}
2446 
2447 	/* If we need to do a partial bind, make sure the driver supports it */
2448 	if (dma->dp_partial_required &&
2449 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2450 
2451 		mnum = ddi_driver_major(dma->dp_dip);
2452 		/*
2453 		 * patchable which allows us to print one warning per major
2454 		 * number.
2455 		 */
2456 		if ((rootnex_bind_warn) &&
2457 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2458 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2459 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2460 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2461 			    "There is a small risk of data corruption in "
2462 			    "particular with large I/Os. The driver should be "
2463 			    "replaced with a corrected version for proper "
2464 			    "system operation. To disable this warning, add "
2465 			    "'set rootnex:rootnex_bind_warn=0' to "
2466 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2467 		}
2468 		return (DDI_DMA_TOOBIG);
2469 	}
2470 
2471 	/*
2472 	 * we might need multiple windows, setup state to handle them. In this
2473 	 * code path, we will have at least one window.
2474 	 */
2475 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2476 	if (e != DDI_SUCCESS) {
2477 		rootnex_teardown_copybuf(dma);
2478 		return (e);
2479 	}
2480 
2481 	window = &dma->dp_window[0];
2482 	cookie = &dma->dp_cookies[0];
2483 	cur_offset = 0;
2484 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2485 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2486 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2487 	}
2488 
2489 	/* loop though all the cookies we got back from get_sgl() */
2490 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2491 		/*
2492 		 * If we're using the copy buffer, check this cookie and setup
2493 		 * its associated copy buffer state. If this cookie uses the
2494 		 * copy buffer, make sure we sync this window during dma_sync.
2495 		 */
2496 		if (dma->dp_copybuf_size > 0) {
2497 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2498 			    cur_offset, &copybuf_used, &cur_pp);
2499 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2500 				window->wd_dosync = B_TRUE;
2501 			}
2502 		}
2503 
2504 		/*
2505 		 * save away the cookie size, since it could be modified in
2506 		 * the windowing code.
2507 		 */
2508 		dmac_size = cookie->dmac_size;
2509 
2510 		/* if we went over max copybuf size */
2511 		if (dma->dp_copybuf_size &&
2512 		    (copybuf_used > dma->dp_copybuf_size)) {
2513 			partial = B_TRUE;
2514 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2515 			    cookie, cur_offset, &copybuf_used);
2516 			if (e != DDI_SUCCESS) {
2517 				rootnex_teardown_copybuf(dma);
2518 				rootnex_teardown_windows(dma);
2519 				return (e);
2520 			}
2521 
2522 			/*
2523 			 * if the coookie uses the copy buffer, make sure the
2524 			 * new window we just moved to is set to sync.
2525 			 */
2526 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2527 				window->wd_dosync = B_TRUE;
2528 			}
2529 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2530 			    dma->dp_dip);
2531 
2532 		/* if the cookie cnt == max sgllen, move to the next window */
2533 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2534 			partial = B_TRUE;
2535 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2536 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2537 			    cookie, attr, cur_offset);
2538 			if (e != DDI_SUCCESS) {
2539 				rootnex_teardown_copybuf(dma);
2540 				rootnex_teardown_windows(dma);
2541 				return (e);
2542 			}
2543 
2544 			/*
2545 			 * if the coookie uses the copy buffer, make sure the
2546 			 * new window we just moved to is set to sync.
2547 			 */
2548 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2549 				window->wd_dosync = B_TRUE;
2550 			}
2551 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2552 			    dma->dp_dip);
2553 
2554 		/* else if we will be over maxxfer */
2555 		} else if ((window->wd_size + dmac_size) >
2556 		    dma->dp_maxxfer) {
2557 			partial = B_TRUE;
2558 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2559 			    cookie);
2560 			if (e != DDI_SUCCESS) {
2561 				rootnex_teardown_copybuf(dma);
2562 				rootnex_teardown_windows(dma);
2563 				return (e);
2564 			}
2565 
2566 			/*
2567 			 * if the coookie uses the copy buffer, make sure the
2568 			 * new window we just moved to is set to sync.
2569 			 */
2570 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2571 				window->wd_dosync = B_TRUE;
2572 			}
2573 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2574 			    dma->dp_dip);
2575 
2576 		/* else this cookie fits in the current window */
2577 		} else {
2578 			window->wd_cookie_cnt++;
2579 			window->wd_size += dmac_size;
2580 		}
2581 
2582 		/* track our offset into the buffer, go to the next cookie */
2583 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2584 		ASSERT(cookie->dmac_size <= dmac_size);
2585 		cur_offset += dmac_size;
2586 		cookie++;
2587 	}
2588 
2589 	/* if we ended up with a zero sized window in the end, clean it up */
2590 	if (window->wd_size == 0) {
2591 		hp->dmai_nwin--;
2592 		window--;
2593 	}
2594 
2595 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
2596 
2597 	if (!partial) {
2598 		return (DDI_DMA_MAPPED);
2599 	}
2600 
2601 	ASSERT(dma->dp_partial_required);
2602 	return (DDI_DMA_PARTIAL_MAP);
2603 }
2604 
2605 
2606 /*
2607  * rootnex_setup_copybuf()
2608  *    Called in bind slowpath. Figures out if we're going to use the copy
2609  *    buffer, and if we do, sets up the basic state to handle it.
2610  */
2611 static int
2612 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2613     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
2614 {
2615 	rootnex_sglinfo_t *sinfo;
2616 	ddi_dma_attr_t lattr;
2617 	size_t max_copybuf;
2618 	int cansleep;
2619 	int e;
2620 #if !defined(__amd64)
2621 	int vmflag;
2622 #endif
2623 
2624 
2625 	sinfo = &dma->dp_sglinfo;
2626 
2627 	/*
2628 	 * read this first so it's consistent through the routine so we can
2629 	 * patch it on the fly.
2630 	 */
2631 	max_copybuf = rootnex_max_copybuf_size & MMU_PAGEMASK;
2632 
2633 	/* We need to call into the rootnex on ddi_dma_sync() */
2634 	hp->dmai_rflags &= ~DMP_NOSYNC;
2635 
2636 	/* make sure the copybuf size <= the max size */
2637 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
2638 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
2639 
2640 #if !defined(__amd64)
2641 	/*
2642 	 * if we don't have kva space to copy to/from, allocate the KVA space
2643 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
2644 	 * the 64-bit kernel.
2645 	 */
2646 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
2647 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
2648 
2649 		/* convert the sleep flags */
2650 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2651 			vmflag = VM_SLEEP;
2652 		} else {
2653 			vmflag = VM_NOSLEEP;
2654 		}
2655 
2656 		/* allocate Kernel VA space that we can bcopy to/from */
2657 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
2658 		    vmflag);
2659 		if (dma->dp_kva == NULL) {
2660 			return (DDI_DMA_NORESOURCES);
2661 		}
2662 	}
2663 #endif
2664 
2665 	/* convert the sleep flags */
2666 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2667 		cansleep = 1;
2668 	} else {
2669 		cansleep = 0;
2670 	}
2671 
2672 	/*
2673 	 * Allocated the actual copy buffer. This needs to fit within the DMA
2674 	 * engines limits, so we can't use kmem_alloc...
2675 	 */
2676 	lattr = *attr;
2677 	lattr.dma_attr_align = MMU_PAGESIZE;
2678 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
2679 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
2680 	if (e != DDI_SUCCESS) {
2681 #if !defined(__amd64)
2682 		if (dma->dp_kva != NULL) {
2683 			vmem_free(heap_arena, dma->dp_kva,
2684 			    dma->dp_copybuf_size);
2685 		}
2686 #endif
2687 		return (DDI_DMA_NORESOURCES);
2688 	}
2689 
2690 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
2691 	    size_t, dma->dp_copybuf_size);
2692 
2693 	return (DDI_SUCCESS);
2694 }
2695 
2696 
2697 /*
2698  * rootnex_setup_windows()
2699  *    Called in bind slowpath to setup the window state. We always have windows
2700  *    in the slowpath. Even if the window count = 1.
2701  */
2702 static int
2703 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2704     ddi_dma_attr_t *attr, int kmflag)
2705 {
2706 	rootnex_window_t *windowp;
2707 	rootnex_sglinfo_t *sinfo;
2708 	size_t copy_state_size;
2709 	size_t win_state_size;
2710 	size_t state_available;
2711 	size_t space_needed;
2712 	uint_t copybuf_win;
2713 	uint_t maxxfer_win;
2714 	size_t space_used;
2715 	uint_t sglwin;
2716 
2717 
2718 	sinfo = &dma->dp_sglinfo;
2719 
2720 	dma->dp_current_win = 0;
2721 	hp->dmai_nwin = 0;
2722 
2723 	/* If we don't need to do a partial, we only have one window */
2724 	if (!dma->dp_partial_required) {
2725 		dma->dp_max_win = 1;
2726 
2727 	/*
2728 	 * we need multiple windows, need to figure out the worse case number
2729 	 * of windows.
2730 	 */
2731 	} else {
2732 		/*
2733 		 * if we need windows because we need more copy buffer that
2734 		 * we allow, the worse case number of windows we could need
2735 		 * here would be (copybuf space required / copybuf space that
2736 		 * we have) plus one for remainder, and plus 2 to handle the
2737 		 * extra pages on the trim for the first and last pages of the
2738 		 * buffer (a page is the minimum window size so under the right
2739 		 * attr settings, you could have a window for each page).
2740 		 * The last page will only be hit here if the size is not a
2741 		 * multiple of the granularity (which theoretically shouldn't
2742 		 * be the case but never has been enforced, so we could have
2743 		 * broken things without it).
2744 		 */
2745 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
2746 			ASSERT(dma->dp_copybuf_size > 0);
2747 			copybuf_win = (sinfo->si_copybuf_req /
2748 			    dma->dp_copybuf_size) + 1 + 2;
2749 		} else {
2750 			copybuf_win = 0;
2751 		}
2752 
2753 		/*
2754 		 * if we need windows because we have more cookies than the H/W
2755 		 * can handle, the number of windows we would need here would
2756 		 * be (cookie count / cookies count H/W supports) plus one for
2757 		 * remainder, and plus 2 to handle the extra pages on the trim
2758 		 * (see above comment about trim)
2759 		 */
2760 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
2761 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
2762 			    + 1) + 2;
2763 		} else {
2764 			sglwin = 0;
2765 		}
2766 
2767 		/*
2768 		 * if we need windows because we're binding more memory than the
2769 		 * H/W can transfer at once, the number of windows we would need
2770 		 * here would be (xfer count / max xfer H/W supports) plus one
2771 		 * for remainder, and plus 2 to handle the extra pages on the
2772 		 * trim (see above comment about trim)
2773 		 */
2774 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
2775 			maxxfer_win = (dma->dp_dma.dmao_size /
2776 			    dma->dp_maxxfer) + 1 + 2;
2777 		} else {
2778 			maxxfer_win = 0;
2779 		}
2780 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
2781 		ASSERT(dma->dp_max_win > 0);
2782 	}
2783 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
2784 
2785 	/*
2786 	 * Get space for window and potential copy buffer state. Before we
2787 	 * go and allocate memory, see if we can get away with using what's
2788 	 * left in the pre-allocted state or the dynamically allocated sgl.
2789 	 */
2790 	space_used = (uintptr_t)(sinfo->si_sgl_size *
2791 	    sizeof (ddi_dma_cookie_t));
2792 
2793 	/* if we dynamically allocated space for the cookies */
2794 	if (dma->dp_need_to_free_cookie) {
2795 		/* if we have more space in the pre-allocted buffer, use it */
2796 		ASSERT(space_used <= dma->dp_cookie_size);
2797 		if ((dma->dp_cookie_size - space_used) <=
2798 		    rootnex_state->r_prealloc_size) {
2799 			state_available = rootnex_state->r_prealloc_size;
2800 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
2801 
2802 		/*
2803 		 * else, we have more free space in the dynamically allocated
2804 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
2805 		 * didn't need a lot of cookies.
2806 		 */
2807 		} else {
2808 			state_available = dma->dp_cookie_size - space_used;
2809 			windowp = (rootnex_window_t *)
2810 			    &dma->dp_cookies[sinfo->si_sgl_size];
2811 		}
2812 
2813 	/* we used the pre-alloced buffer */
2814 	} else {
2815 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
2816 		state_available = rootnex_state->r_prealloc_size - space_used;
2817 		windowp = (rootnex_window_t *)
2818 		    &dma->dp_cookies[sinfo->si_sgl_size];
2819 	}
2820 
2821 	/*
2822 	 * figure out how much state we need to track the copy buffer. Add an
2823 	 * addition 8 bytes for pointer alignemnt later.
2824 	 */
2825 	if (dma->dp_copybuf_size > 0) {
2826 		copy_state_size = sinfo->si_max_pages *
2827 		    sizeof (rootnex_pgmap_t);
2828 	} else {
2829 		copy_state_size = 0;
2830 	}
2831 	/* add an additional 8 bytes for pointer alignment */
2832 	space_needed = win_state_size + copy_state_size + 0x8;
2833 
2834 	/* if we have enough space already, use it */
2835 	if (state_available >= space_needed) {
2836 		dma->dp_window = windowp;
2837 		dma->dp_need_to_free_window = B_FALSE;
2838 
2839 	/* not enough space, need to allocate more. */
2840 	} else {
2841 		dma->dp_window = kmem_alloc(space_needed, kmflag);
2842 		if (dma->dp_window == NULL) {
2843 			return (DDI_DMA_NORESOURCES);
2844 		}
2845 		dma->dp_need_to_free_window = B_TRUE;
2846 		dma->dp_window_size = space_needed;
2847 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
2848 		    dma->dp_dip, size_t, space_needed);
2849 	}
2850 
2851 	/*
2852 	 * we allocate copy buffer state and window state at the same time.
2853 	 * setup our copy buffer state pointers. Make sure it's aligned.
2854 	 */
2855 	if (dma->dp_copybuf_size > 0) {
2856 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
2857 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
2858 
2859 #if !defined(__amd64)
2860 		/*
2861 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
2862 		 * false/NULL. Should be quicker to bzero vs loop and set.
2863 		 */
2864 		bzero(dma->dp_pgmap, copy_state_size);
2865 #endif
2866 	} else {
2867 		dma->dp_pgmap = NULL;
2868 	}
2869 
2870 	return (DDI_SUCCESS);
2871 }
2872 
2873 
2874 /*
2875  * rootnex_teardown_copybuf()
2876  *    cleans up after rootnex_setup_copybuf()
2877  */
2878 static void
2879 rootnex_teardown_copybuf(rootnex_dma_t *dma)
2880 {
2881 #if !defined(__amd64)
2882 	int i;
2883 
2884 	/*
2885 	 * if we allocated kernel heap VMEM space, go through all the pages and
2886 	 * map out any of the ones that we're mapped into the kernel heap VMEM
2887 	 * arena. Then free the VMEM space.
2888 	 */
2889 	if (dma->dp_kva != NULL) {
2890 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
2891 			if (dma->dp_pgmap[i].pm_mapped) {
2892 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
2893 				    MMU_PAGESIZE, HAT_UNLOAD);
2894 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
2895 			}
2896 		}
2897 
2898 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
2899 	}
2900 
2901 #endif
2902 
2903 	/* if we allocated a copy buffer, free it */
2904 	if (dma->dp_cbaddr != NULL) {
2905 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
2906 	}
2907 }
2908 
2909 
2910 /*
2911  * rootnex_teardown_windows()
2912  *    cleans up after rootnex_setup_windows()
2913  */
2914 static void
2915 rootnex_teardown_windows(rootnex_dma_t *dma)
2916 {
2917 	/*
2918 	 * if we had to allocate window state on the last bind (because we
2919 	 * didn't have enough pre-allocated space in the handle), free it.
2920 	 */
2921 	if (dma->dp_need_to_free_window) {
2922 		kmem_free(dma->dp_window, dma->dp_window_size);
2923 	}
2924 }
2925 
2926 
2927 /*
2928  * rootnex_init_win()
2929  *    Called in bind slow path during creation of a new window. Initializes
2930  *    window state to default values.
2931  */
2932 /*ARGSUSED*/
2933 static void
2934 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2935     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
2936 {
2937 	hp->dmai_nwin++;
2938 	window->wd_dosync = B_FALSE;
2939 	window->wd_offset = cur_offset;
2940 	window->wd_size = 0;
2941 	window->wd_first_cookie = cookie;
2942 	window->wd_cookie_cnt = 0;
2943 	window->wd_trim.tr_trim_first = B_FALSE;
2944 	window->wd_trim.tr_trim_last = B_FALSE;
2945 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
2946 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
2947 #if !defined(__amd64)
2948 	window->wd_remap_copybuf = dma->dp_cb_remaping;
2949 #endif
2950 }
2951 
2952 
2953 /*
2954  * rootnex_setup_cookie()
2955  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
2956  *    the sgl uses the copy buffer, we need to go through each cookie, figure
2957  *    out if it uses the copy buffer, and if it does, save away everything we'll
2958  *    need during sync.
2959  */
2960 static void
2961 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
2962     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
2963     page_t **cur_pp)
2964 {
2965 	boolean_t copybuf_sz_power_2;
2966 	rootnex_sglinfo_t *sinfo;
2967 	uint_t pidx;
2968 	uint_t pcnt;
2969 	off_t poff;
2970 #if defined(__amd64)
2971 	pfn_t pfn;
2972 #else
2973 	page_t **pplist;
2974 #endif
2975 
2976 	sinfo = &dma->dp_sglinfo;
2977 
2978 	/*
2979 	 * Calculate the page index relative to the start of the buffer. The
2980 	 * index to the current page for our buffer is the offset into the
2981 	 * first page of the buffer plus our current offset into the buffer
2982 	 * itself, shifted of course...
2983 	 */
2984 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
2985 	ASSERT(pidx < sinfo->si_max_pages);
2986 
2987 	/* if this cookie uses the copy buffer */
2988 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2989 		/*
2990 		 * NOTE: we know that since this cookie uses the copy buffer, it
2991 		 * is <= MMU_PAGESIZE.
2992 		 */
2993 
2994 		/*
2995 		 * get the offset into the page. For the 64-bit kernel, get the
2996 		 * pfn which we'll use with seg kpm.
2997 		 */
2998 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
2999 #if defined(__amd64)
3000 		pfn = cookie->_dmu._dmac_ll >> MMU_PAGESHIFT;
3001 #endif
3002 
3003 		/* figure out if the copybuf size is a power of 2 */
3004 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3005 			copybuf_sz_power_2 = B_FALSE;
3006 		} else {
3007 			copybuf_sz_power_2 = B_TRUE;
3008 		}
3009 
3010 		/* This page uses the copy buffer */
3011 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3012 
3013 		/*
3014 		 * save the copy buffer KVA that we'll use with this page.
3015 		 * if we still fit within the copybuf, it's a simple add.
3016 		 * otherwise, we need to wrap over using & or % accordingly.
3017 		 */
3018 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3019 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3020 			    *copybuf_used;
3021 		} else {
3022 			if (copybuf_sz_power_2) {
3023 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3024 				    (uintptr_t)dma->dp_cbaddr +
3025 				    (*copybuf_used &
3026 				    (dma->dp_copybuf_size - 1)));
3027 			} else {
3028 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3029 				    (uintptr_t)dma->dp_cbaddr +
3030 				    (*copybuf_used % dma->dp_copybuf_size));
3031 			}
3032 		}
3033 
3034 		/*
3035 		 * over write the cookie physical address with the address of
3036 		 * the physical address of the copy buffer page that we will
3037 		 * use.
3038 		 */
3039 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3040 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3041 
3042 		/* if we have a kernel VA, it's easy, just save that address */
3043 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3044 		    (sinfo->si_asp == &kas)) {
3045 			/*
3046 			 * save away the page aligned virtual address of the
3047 			 * driver buffer. Offsets are handled in the sync code.
3048 			 */
3049 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3050 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3051 			    & MMU_PAGEMASK);
3052 #if !defined(__amd64)
3053 			/*
3054 			 * we didn't need to, and will never need to map this
3055 			 * page.
3056 			 */
3057 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3058 #endif
3059 
3060 		/* we don't have a kernel VA. We need one for the bcopy. */
3061 		} else {
3062 #if defined(__amd64)
3063 			/*
3064 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3065 			 * get a Kernel VA for the corresponding pfn.
3066 			 */
3067 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3068 #else
3069 			/*
3070 			 * for the 32-bit kernel, this is a pain. First we'll
3071 			 * save away the page_t or user VA for this page. This
3072 			 * is needed in rootnex_dma_win() when we switch to a
3073 			 * new window which requires us to re-map the copy
3074 			 * buffer.
3075 			 */
3076 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3077 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3078 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3079 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3080 			} else if (pplist != NULL) {
3081 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3082 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3083 			} else {
3084 				dma->dp_pgmap[pidx].pm_pp = NULL;
3085 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3086 				    (((uintptr_t)
3087 				    dmar_object->dmao_obj.virt_obj.v_addr +
3088 				    cur_offset) & MMU_PAGEMASK);
3089 			}
3090 
3091 			/*
3092 			 * save away the page aligned virtual address which was
3093 			 * allocated from the kernel heap arena (taking into
3094 			 * account if we need more copy buffer than we alloced
3095 			 * and use multiple windows to handle this, i.e. &,%).
3096 			 * NOTE: there isn't and physical memory backing up this
3097 			 * virtual address space currently.
3098 			 */
3099 			if ((*copybuf_used + MMU_PAGESIZE) <=
3100 			    dma->dp_copybuf_size) {
3101 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3102 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3103 				    MMU_PAGEMASK);
3104 			} else {
3105 				if (copybuf_sz_power_2) {
3106 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3107 					    (((uintptr_t)dma->dp_kva +
3108 					    (*copybuf_used &
3109 					    (dma->dp_copybuf_size - 1))) &
3110 					    MMU_PAGEMASK);
3111 				} else {
3112 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3113 					    (((uintptr_t)dma->dp_kva +
3114 					    (*copybuf_used %
3115 					    dma->dp_copybuf_size)) &
3116 					    MMU_PAGEMASK);
3117 				}
3118 			}
3119 
3120 			/*
3121 			 * if we haven't used up the available copy buffer yet,
3122 			 * map the kva to the physical page.
3123 			 */
3124 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3125 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3126 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3127 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3128 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3129 					    dma->dp_pgmap[pidx].pm_kaddr);
3130 				} else {
3131 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3132 					    sinfo->si_asp,
3133 					    dma->dp_pgmap[pidx].pm_kaddr);
3134 				}
3135 
3136 			/*
3137 			 * we've used up the available copy buffer, this page
3138 			 * will have to be mapped during rootnex_dma_win() when
3139 			 * we switch to a new window which requires a re-map
3140 			 * the copy buffer. (32-bit kernel only)
3141 			 */
3142 			} else {
3143 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3144 			}
3145 #endif
3146 			/* go to the next page_t */
3147 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3148 				*cur_pp = (*cur_pp)->p_next;
3149 			}
3150 		}
3151 
3152 		/* add to the copy buffer count */
3153 		*copybuf_used += MMU_PAGESIZE;
3154 
3155 	/*
3156 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3157 	 * cookie occupies to reflect this.
3158 	 */
3159 	} else {
3160 		/*
3161 		 * figure out how many pages the cookie occupies. We need to
3162 		 * use the original page offset of the buffer and the cookies
3163 		 * offset in the buffer to do this.
3164 		 */
3165 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3166 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3167 
3168 		while (pcnt > 0) {
3169 #if !defined(__amd64)
3170 			/*
3171 			 * the 32-bit kernel doesn't have seg kpm, so we need
3172 			 * to map in the driver buffer (if it didn't come down
3173 			 * with a kernel VA) on the fly. Since this page doesn't
3174 			 * use the copy buffer, it's not, or will it ever, have
3175 			 * to be mapped in.
3176 			 */
3177 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3178 #endif
3179 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3180 
3181 			/*
3182 			 * we need to update pidx and cur_pp or we'll loose
3183 			 * track of where we are.
3184 			 */
3185 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3186 				*cur_pp = (*cur_pp)->p_next;
3187 			}
3188 			pidx++;
3189 			pcnt--;
3190 		}
3191 	}
3192 }
3193 
3194 
3195 /*
3196  * rootnex_sgllen_window_boundary()
3197  *    Called in the bind slow path when the next cookie causes us to exceed (in
3198  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3199  *    length supported by the DMA H/W.
3200  */
3201 static int
3202 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3203     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3204     off_t cur_offset)
3205 {
3206 	off_t new_offset;
3207 	size_t trim_sz;
3208 	off_t coffset;
3209 
3210 
3211 	/*
3212 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3213 	 * the next window and init it. We're done.
3214 	 */
3215 	if (!dma->dp_trim_required) {
3216 		(*windowp)++;
3217 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3218 		(*windowp)->wd_cookie_cnt++;
3219 		(*windowp)->wd_size = cookie->dmac_size;
3220 		return (DDI_SUCCESS);
3221 	}
3222 
3223 	/* figure out how much we need to trim from the window */
3224 	ASSERT(attr->dma_attr_granular != 0);
3225 	if (dma->dp_granularity_power_2) {
3226 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3227 	} else {
3228 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3229 	}
3230 
3231 	/* The window's a whole multiple of granularity. We're done */
3232 	if (trim_sz == 0) {
3233 		(*windowp)++;
3234 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3235 		(*windowp)->wd_cookie_cnt++;
3236 		(*windowp)->wd_size = cookie->dmac_size;
3237 		return (DDI_SUCCESS);
3238 	}
3239 
3240 	/*
3241 	 * The window's not a whole multiple of granularity, since we know this
3242 	 * is due to the sgllen, we need to go back to the last cookie and trim
3243 	 * that one, add the left over part of the old cookie into the new
3244 	 * window, and then add in the new cookie into the new window.
3245 	 */
3246 
3247 	/*
3248 	 * make sure the driver isn't making us do something bad... Trimming and
3249 	 * sgllen == 1 don't go together.
3250 	 */
3251 	if (attr->dma_attr_sgllen == 1) {
3252 		return (DDI_DMA_NOMAPPING);
3253 	}
3254 
3255 	/*
3256 	 * first, setup the current window to account for the trim. Need to go
3257 	 * back to the last cookie for this.
3258 	 */
3259 	cookie--;
3260 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3261 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3262 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3263 	ASSERT(cookie->dmac_size > trim_sz);
3264 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3265 	(*windowp)->wd_size -= trim_sz;
3266 
3267 	/* save the buffer offsets for the next window */
3268 	coffset = cookie->dmac_size - trim_sz;
3269 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3270 
3271 	/*
3272 	 * set this now in case this is the first window. all other cases are
3273 	 * set in dma_win()
3274 	 */
3275 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3276 
3277 	/*
3278 	 * initialize the next window using what's left over in the previous
3279 	 * cookie.
3280 	 */
3281 	(*windowp)++;
3282 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3283 	(*windowp)->wd_cookie_cnt++;
3284 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3285 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3286 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3287 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3288 		(*windowp)->wd_dosync = B_TRUE;
3289 	}
3290 
3291 	/*
3292 	 * now go back to the current cookie and add it to the new window. set
3293 	 * the new window size to the what was left over from the previous
3294 	 * cookie and what's in the current cookie.
3295 	 */
3296 	cookie++;
3297 	(*windowp)->wd_cookie_cnt++;
3298 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3299 
3300 	/*
3301 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3302 	 * a max size of maxxfer). Handle that case.
3303 	 */
3304 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3305 		/*
3306 		 * maxxfer is already a whole multiple of granularity, and this
3307 		 * trim will be <= the previous trim (since a cookie can't be
3308 		 * larger than maxxfer). Make things simple here.
3309 		 */
3310 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3311 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3312 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3313 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3314 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3315 		(*windowp)->wd_size -= trim_sz;
3316 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3317 
3318 		/* save the buffer offsets for the next window */
3319 		coffset = cookie->dmac_size - trim_sz;
3320 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3321 
3322 		/* setup the next window */
3323 		(*windowp)++;
3324 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3325 		(*windowp)->wd_cookie_cnt++;
3326 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3327 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3328 		    coffset;
3329 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3330 	}
3331 
3332 	return (DDI_SUCCESS);
3333 }
3334 
3335 
3336 /*
3337  * rootnex_copybuf_window_boundary()
3338  *    Called in bind slowpath when we get to a window boundary because we used
3339  *    up all the copy buffer that we have.
3340  */
3341 static int
3342 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3343     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3344     size_t *copybuf_used)
3345 {
3346 	rootnex_sglinfo_t *sinfo;
3347 	off_t new_offset;
3348 	size_t trim_sz;
3349 	off_t coffset;
3350 	uint_t pidx;
3351 	off_t poff;
3352 
3353 
3354 	sinfo = &dma->dp_sglinfo;
3355 
3356 	/*
3357 	 * the copy buffer should be a whole multiple of page size. We know that
3358 	 * this cookie is <= MMU_PAGESIZE.
3359 	 */
3360 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3361 
3362 	/*
3363 	 * from now on, all new windows in this bind need to be re-mapped during
3364 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3365 	 * space...
3366 	 */
3367 #if !defined(__amd64)
3368 	dma->dp_cb_remaping = B_TRUE;
3369 #endif
3370 
3371 	/* reset copybuf used */
3372 	*copybuf_used = 0;
3373 
3374 	/*
3375 	 * if we don't have to trim (since granularity is set to 1), go to the
3376 	 * next window and add the current cookie to it. We know the current
3377 	 * cookie uses the copy buffer since we're in this code path.
3378 	 */
3379 	if (!dma->dp_trim_required) {
3380 		(*windowp)++;
3381 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3382 
3383 		/* Add this cookie to the new window */
3384 		(*windowp)->wd_cookie_cnt++;
3385 		(*windowp)->wd_size += cookie->dmac_size;
3386 		*copybuf_used += MMU_PAGESIZE;
3387 		return (DDI_SUCCESS);
3388 	}
3389 
3390 	/*
3391 	 * *** may need to trim, figure it out.
3392 	 */
3393 
3394 	/* figure out how much we need to trim from the window */
3395 	if (dma->dp_granularity_power_2) {
3396 		trim_sz = (*windowp)->wd_size &
3397 		    (hp->dmai_attr.dma_attr_granular - 1);
3398 	} else {
3399 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3400 	}
3401 
3402 	/*
3403 	 * if the window's a whole multiple of granularity, go to the next
3404 	 * window, init it, then add in the current cookie. We know the current
3405 	 * cookie uses the copy buffer since we're in this code path.
3406 	 */
3407 	if (trim_sz == 0) {
3408 		(*windowp)++;
3409 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3410 
3411 		/* Add this cookie to the new window */
3412 		(*windowp)->wd_cookie_cnt++;
3413 		(*windowp)->wd_size += cookie->dmac_size;
3414 		*copybuf_used += MMU_PAGESIZE;
3415 		return (DDI_SUCCESS);
3416 	}
3417 
3418 	/*
3419 	 * *** We figured it out, we definitly need to trim
3420 	 */
3421 
3422 	/*
3423 	 * make sure the driver isn't making us do something bad...
3424 	 * Trimming and sgllen == 1 don't go together.
3425 	 */
3426 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3427 		return (DDI_DMA_NOMAPPING);
3428 	}
3429 
3430 	/*
3431 	 * first, setup the current window to account for the trim. Need to go
3432 	 * back to the last cookie for this. Some of the last cookie will be in
3433 	 * the current window, and some of the last cookie will be in the new
3434 	 * window. All of the current cookie will be in the new window.
3435 	 */
3436 	cookie--;
3437 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3438 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3439 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3440 	ASSERT(cookie->dmac_size > trim_sz);
3441 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3442 	(*windowp)->wd_size -= trim_sz;
3443 
3444 	/*
3445 	 * we're trimming the last cookie (not the current cookie). So that
3446 	 * last cookie may have or may not have been using the copy buffer (
3447 	 * we know the cookie passed in uses the copy buffer since we're in
3448 	 * this code path).
3449 	 *
3450 	 * If the last cookie doesn't use the copy buffer, nothing special to
3451 	 * do. However, if it does uses the copy buffer, it will be both the
3452 	 * last page in the current window and the first page in the next
3453 	 * window. Since we are reusing the copy buffer (and KVA space on the
3454 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3455 	 * current window, and the start of the copy buffer in the next window.
3456 	 * Track that info... The cookie physical address was already set to
3457 	 * the copy buffer physical address in setup_cookie..
3458 	 */
3459 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3460 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3461 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3462 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3463 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3464 		(*windowp)->wd_trim.tr_last_cbaddr =
3465 		    dma->dp_pgmap[pidx].pm_cbaddr;
3466 #if !defined(__amd64)
3467 		(*windowp)->wd_trim.tr_last_kaddr =
3468 		    dma->dp_pgmap[pidx].pm_kaddr;
3469 #endif
3470 	}
3471 
3472 	/* save the buffer offsets for the next window */
3473 	coffset = cookie->dmac_size - trim_sz;
3474 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3475 
3476 	/*
3477 	 * set this now in case this is the first window. all other cases are
3478 	 * set in dma_win()
3479 	 */
3480 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3481 
3482 	/*
3483 	 * initialize the next window using what's left over in the previous
3484 	 * cookie.
3485 	 */
3486 	(*windowp)++;
3487 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3488 	(*windowp)->wd_cookie_cnt++;
3489 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3490 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3491 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3492 
3493 	/*
3494 	 * again, we're tracking if the last cookie uses the copy buffer.
3495 	 * read the comment above for more info on why we need to track
3496 	 * additional state.
3497 	 *
3498 	 * For the first cookie in the new window, we need reset the physical
3499 	 * address to DMA into to the start of the copy buffer plus any
3500 	 * initial page offset which may be present.
3501 	 */
3502 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3503 		(*windowp)->wd_dosync = B_TRUE;
3504 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3505 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3506 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3507 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
3508 		(*windowp)->wd_trim.tr_first_paddr = ptob64(hat_getpfnum(
3509 		    kas.a_hat, dma->dp_cbaddr)) + poff;
3510 #if !defined(__amd64)
3511 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3512 #endif
3513 		/* account for the cookie copybuf usage in the new window */
3514 		*copybuf_used += MMU_PAGESIZE;
3515 
3516 		/*
3517 		 * every piece of code has to have a hack, and here is this
3518 		 * ones :-)
3519 		 *
3520 		 * There is a complex interaction between setup_cookie and the
3521 		 * copybuf window boundary. The complexity had to be in either
3522 		 * the maxxfer window, or the copybuf window, and I chose the
3523 		 * copybuf code.
3524 		 *
3525 		 * So in this code path, we have taken the last cookie,
3526 		 * virtually broken it in half due to the trim, and it happens
3527 		 * to use the copybuf which further complicates life. At the
3528 		 * same time, we have already setup the current cookie, which
3529 		 * is now wrong. More background info: the current cookie uses
3530 		 * the copybuf, so it is only a page long max. So we need to
3531 		 * fix the current cookies copy buffer address, physical
3532 		 * address, and kva for the 32-bit kernel. We due this by
3533 		 * bumping them by page size (of course, we can't due this on
3534 		 * the physical address since the copy buffer may not be
3535 		 * physically contiguous).
3536 		 */
3537 		cookie++;
3538 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
3539 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
3540 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3541 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
3542 #if !defined(__amd64)
3543 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3544 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3545 #endif
3546 	} else {
3547 		/* go back to the current cookie */
3548 		cookie++;
3549 	}
3550 
3551 	/*
3552 	 * add the current cookie to the new window. set the new window size to
3553 	 * the what was left over from the previous cookie and what's in the
3554 	 * current cookie.
3555 	 */
3556 	(*windowp)->wd_cookie_cnt++;
3557 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3558 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
3559 
3560 	/*
3561 	 * we know that the cookie passed in always uses the copy buffer. We
3562 	 * wouldn't be here if it didn't.
3563 	 */
3564 	*copybuf_used += MMU_PAGESIZE;
3565 
3566 	return (DDI_SUCCESS);
3567 }
3568 
3569 
3570 /*
3571  * rootnex_maxxfer_window_boundary()
3572  *    Called in bind slowpath when we get to a window boundary because we will
3573  *    go over maxxfer.
3574  */
3575 static int
3576 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3577     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
3578 {
3579 	size_t dmac_size;
3580 	off_t new_offset;
3581 	size_t trim_sz;
3582 	off_t coffset;
3583 
3584 
3585 	/*
3586 	 * calculate how much we have to trim off of the current cookie to equal
3587 	 * maxxfer. We don't have to account for granularity here since our
3588 	 * maxxfer already takes that into account.
3589 	 */
3590 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
3591 	ASSERT(trim_sz <= cookie->dmac_size);
3592 	ASSERT(trim_sz <= dma->dp_maxxfer);
3593 
3594 	/* save cookie size since we need it later and we might change it */
3595 	dmac_size = cookie->dmac_size;
3596 
3597 	/*
3598 	 * if we're not trimming the entire cookie, setup the current window to
3599 	 * account for the trim.
3600 	 */
3601 	if (trim_sz < cookie->dmac_size) {
3602 		(*windowp)->wd_cookie_cnt++;
3603 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3604 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3605 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3606 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3607 		(*windowp)->wd_size = dma->dp_maxxfer;
3608 
3609 		/*
3610 		 * set the adjusted cookie size now in case this is the first
3611 		 * window. All other windows are taken care of in get win
3612 		 */
3613 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3614 	}
3615 
3616 	/*
3617 	 * coffset is the current offset within the cookie, new_offset is the
3618 	 * current offset with the entire buffer.
3619 	 */
3620 	coffset = dmac_size - trim_sz;
3621 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3622 
3623 	/* initialize the next window */
3624 	(*windowp)++;
3625 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3626 	(*windowp)->wd_cookie_cnt++;
3627 	(*windowp)->wd_size = trim_sz;
3628 	if (trim_sz < dmac_size) {
3629 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3630 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3631 		    coffset;
3632 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3633 	}
3634 
3635 	return (DDI_SUCCESS);
3636 }
3637 
3638 
3639 /*
3640  * rootnex_dma_sync()
3641  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
3642  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
3643  *    is set, ddi_dma_sync() returns immediately passing back success.
3644  */
3645 /*ARGSUSED*/
3646 static int
3647 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3648     off_t off, size_t len, uint_t cache_flags)
3649 {
3650 	rootnex_sglinfo_t *sinfo;
3651 	rootnex_pgmap_t *cbpage;
3652 	rootnex_window_t *win;
3653 	ddi_dma_impl_t *hp;
3654 	rootnex_dma_t *dma;
3655 	caddr_t fromaddr;
3656 	caddr_t toaddr;
3657 	uint_t psize;
3658 	off_t offset;
3659 	uint_t pidx;
3660 	size_t size;
3661 	off_t poff;
3662 	int e;
3663 
3664 
3665 	hp = (ddi_dma_impl_t *)handle;
3666 	dma = (rootnex_dma_t *)hp->dmai_private;
3667 	sinfo = &dma->dp_sglinfo;
3668 
3669 	/*
3670 	 * if we don't have any windows, we don't need to sync. A copybuf
3671 	 * will cause us to have at least one window.
3672 	 */
3673 	if (dma->dp_window == NULL) {
3674 		return (DDI_SUCCESS);
3675 	}
3676 
3677 	/* This window may not need to be sync'd */
3678 	win = &dma->dp_window[dma->dp_current_win];
3679 	if (!win->wd_dosync) {
3680 		return (DDI_SUCCESS);
3681 	}
3682 
3683 	/* handle off and len special cases */
3684 	if ((off == 0) || (rootnex_sync_ignore_params)) {
3685 		offset = win->wd_offset;
3686 	} else {
3687 		offset = off;
3688 	}
3689 	if ((len == 0) || (rootnex_sync_ignore_params)) {
3690 		size = win->wd_size;
3691 	} else {
3692 		size = len;
3693 	}
3694 
3695 	/* check the sync args to make sure they make a little sense */
3696 	if (rootnex_sync_check_parms) {
3697 		e = rootnex_valid_sync_parms(hp, win, offset, size,
3698 		    cache_flags);
3699 		if (e != DDI_SUCCESS) {
3700 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
3701 			return (DDI_FAILURE);
3702 		}
3703 	}
3704 
3705 	/*
3706 	 * special case the first page to handle the offset into the page. The
3707 	 * offset to the current page for our buffer is the offset into the
3708 	 * first page of the buffer plus our current offset into the buffer
3709 	 * itself, masked of course.
3710 	 */
3711 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
3712 	psize = MIN((MMU_PAGESIZE - poff), size);
3713 
3714 	/* go through all the pages that we want to sync */
3715 	while (size > 0) {
3716 		/*
3717 		 * Calculate the page index relative to the start of the buffer.
3718 		 * The index to the current page for our buffer is the offset
3719 		 * into the first page of the buffer plus our current offset
3720 		 * into the buffer itself, shifted of course...
3721 		 */
3722 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
3723 		ASSERT(pidx < sinfo->si_max_pages);
3724 
3725 		/*
3726 		 * if this page uses the copy buffer, we need to sync it,
3727 		 * otherwise, go on to the next page.
3728 		 */
3729 		cbpage = &dma->dp_pgmap[pidx];
3730 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
3731 		    (cbpage->pm_uses_copybuf == B_FALSE));
3732 		if (cbpage->pm_uses_copybuf) {
3733 			/* cbaddr and kaddr should be page aligned */
3734 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
3735 			    MMU_PAGEOFFSET) == 0);
3736 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
3737 			    MMU_PAGEOFFSET) == 0);
3738 
3739 			/*
3740 			 * if we're copying for the device, we are going to
3741 			 * copy from the drivers buffer and to the rootnex
3742 			 * allocated copy buffer.
3743 			 */
3744 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
3745 				fromaddr = cbpage->pm_kaddr + poff;
3746 				toaddr = cbpage->pm_cbaddr + poff;
3747 				DTRACE_PROBE2(rootnex__sync__dev,
3748 				    dev_info_t *, dma->dp_dip, size_t, psize);
3749 
3750 			/*
3751 			 * if we're copying for the cpu/kernel, we are going to
3752 			 * copy from the rootnex allocated copy buffer to the
3753 			 * drivers buffer.
3754 			 */
3755 			} else {
3756 				fromaddr = cbpage->pm_cbaddr + poff;
3757 				toaddr = cbpage->pm_kaddr + poff;
3758 				DTRACE_PROBE2(rootnex__sync__cpu,
3759 				    dev_info_t *, dma->dp_dip, size_t, psize);
3760 			}
3761 
3762 			bcopy(fromaddr, toaddr, psize);
3763 		}
3764 
3765 		/*
3766 		 * decrement size until we're done, update our offset into the
3767 		 * buffer, and get the next page size.
3768 		 */
3769 		size -= psize;
3770 		offset += psize;
3771 		psize = MIN(MMU_PAGESIZE, size);
3772 
3773 		/* page offset is zero for the rest of this loop */
3774 		poff = 0;
3775 	}
3776 
3777 	return (DDI_SUCCESS);
3778 }
3779 
3780 
3781 /*
3782  * rootnex_valid_sync_parms()
3783  *    checks the parameters passed to sync to verify they are correct.
3784  */
3785 static int
3786 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
3787     off_t offset, size_t size, uint_t cache_flags)
3788 {
3789 	off_t woffset;
3790 
3791 
3792 	/*
3793 	 * the first part of the test to make sure the offset passed in is
3794 	 * within the window.
3795 	 */
3796 	if (offset < win->wd_offset) {
3797 		return (DDI_FAILURE);
3798 	}
3799 
3800 	/*
3801 	 * second and last part of the test to make sure the offset and length
3802 	 * passed in is within the window.
3803 	 */
3804 	woffset = offset - win->wd_offset;
3805 	if ((woffset + size) > win->wd_size) {
3806 		return (DDI_FAILURE);
3807 	}
3808 
3809 	/*
3810 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
3811 	 * be set too.
3812 	 */
3813 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
3814 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
3815 		return (DDI_SUCCESS);
3816 	}
3817 
3818 	/*
3819 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
3820 	 * should be set. Also DDI_DMA_READ should be set in the flags.
3821 	 */
3822 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
3823 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
3824 	    (hp->dmai_rflags & DDI_DMA_READ)) {
3825 		return (DDI_SUCCESS);
3826 	}
3827 
3828 	return (DDI_FAILURE);
3829 }
3830 
3831 
3832 /*
3833  * rootnex_dma_win()
3834  *    called from ddi_dma_getwin()
3835  */
3836 /*ARGSUSED*/
3837 static int
3838 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3839     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
3840     uint_t *ccountp)
3841 {
3842 	rootnex_window_t *window;
3843 	rootnex_trim_t *trim;
3844 	ddi_dma_impl_t *hp;
3845 	rootnex_dma_t *dma;
3846 #if !defined(__amd64)
3847 	rootnex_sglinfo_t *sinfo;
3848 	rootnex_pgmap_t *pmap;
3849 	uint_t pidx;
3850 	uint_t pcnt;
3851 	off_t poff;
3852 	int i;
3853 #endif
3854 
3855 
3856 	hp = (ddi_dma_impl_t *)handle;
3857 	dma = (rootnex_dma_t *)hp->dmai_private;
3858 #if !defined(__amd64)
3859 	sinfo = &dma->dp_sglinfo;
3860 #endif
3861 
3862 	/* If we try and get a window which doesn't exist, return failure */
3863 	if (win >= hp->dmai_nwin) {
3864 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
3865 		return (DDI_FAILURE);
3866 	}
3867 
3868 	/*
3869 	 * if we don't have any windows, and they're asking for the first
3870 	 * window, setup the cookie pointer to the first cookie in the bind.
3871 	 * setup our return values, then increment the cookie since we return
3872 	 * the first cookie on the stack.
3873 	 */
3874 	if (dma->dp_window == NULL) {
3875 		if (win != 0) {
3876 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
3877 			return (DDI_FAILURE);
3878 		}
3879 		hp->dmai_cookie = dma->dp_cookies;
3880 		*offp = 0;
3881 		*lenp = dma->dp_dma.dmao_size;
3882 		*ccountp = dma->dp_sglinfo.si_sgl_size;
3883 		*cookiep = hp->dmai_cookie[0];
3884 		hp->dmai_cookie++;
3885 		return (DDI_SUCCESS);
3886 	}
3887 
3888 	/* sync the old window before moving on to the new one */
3889 	window = &dma->dp_window[dma->dp_current_win];
3890 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
3891 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
3892 		    DDI_DMA_SYNC_FORCPU);
3893 	}
3894 
3895 #if !defined(__amd64)
3896 	/*
3897 	 * before we move to the next window, if we need to re-map, unmap all
3898 	 * the pages in this window.
3899 	 */
3900 	if (dma->dp_cb_remaping) {
3901 		/*
3902 		 * If we switch to this window again, we'll need to map in
3903 		 * on the fly next time.
3904 		 */
3905 		window->wd_remap_copybuf = B_TRUE;
3906 
3907 		/*
3908 		 * calculate the page index into the buffer where this window
3909 		 * starts, and the number of pages this window takes up.
3910 		 */
3911 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
3912 		    MMU_PAGESHIFT;
3913 		poff = (sinfo->si_buf_offset + window->wd_offset) &
3914 		    MMU_PAGEOFFSET;
3915 		pcnt = mmu_btopr(window->wd_size + poff);
3916 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
3917 
3918 		/* unmap pages which are currently mapped in this window */
3919 		for (i = 0; i < pcnt; i++) {
3920 			if (dma->dp_pgmap[pidx].pm_mapped) {
3921 				hat_unload(kas.a_hat,
3922 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
3923 				    HAT_UNLOAD);
3924 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3925 			}
3926 			pidx++;
3927 		}
3928 	}
3929 #endif
3930 
3931 	/*
3932 	 * Move to the new window.
3933 	 * NOTE: current_win must be set for sync to work right
3934 	 */
3935 	dma->dp_current_win = win;
3936 	window = &dma->dp_window[win];
3937 
3938 	/* if needed, adjust the first and/or last cookies for trim */
3939 	trim = &window->wd_trim;
3940 	if (trim->tr_trim_first) {
3941 		window->wd_first_cookie->_dmu._dmac_ll = trim->tr_first_paddr;
3942 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
3943 #if !defined(__amd64)
3944 		window->wd_first_cookie->dmac_type =
3945 		    (window->wd_first_cookie->dmac_type &
3946 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
3947 #endif
3948 		if (trim->tr_first_copybuf_win) {
3949 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
3950 			    trim->tr_first_cbaddr;
3951 #if !defined(__amd64)
3952 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
3953 			    trim->tr_first_kaddr;
3954 #endif
3955 		}
3956 	}
3957 	if (trim->tr_trim_last) {
3958 		trim->tr_last_cookie->_dmu._dmac_ll = trim->tr_last_paddr;
3959 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
3960 		if (trim->tr_last_copybuf_win) {
3961 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
3962 			    trim->tr_last_cbaddr;
3963 #if !defined(__amd64)
3964 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
3965 			    trim->tr_last_kaddr;
3966 #endif
3967 		}
3968 	}
3969 
3970 	/*
3971 	 * setup the cookie pointer to the first cookie in the window. setup
3972 	 * our return values, then increment the cookie since we return the
3973 	 * first cookie on the stack.
3974 	 */
3975 	hp->dmai_cookie = window->wd_first_cookie;
3976 	*offp = window->wd_offset;
3977 	*lenp = window->wd_size;
3978 	*ccountp = window->wd_cookie_cnt;
3979 	*cookiep = hp->dmai_cookie[0];
3980 	hp->dmai_cookie++;
3981 
3982 #if !defined(__amd64)
3983 	/* re-map copybuf if required for this window */
3984 	if (dma->dp_cb_remaping) {
3985 		/*
3986 		 * calculate the page index into the buffer where this
3987 		 * window starts.
3988 		 */
3989 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
3990 		    MMU_PAGESHIFT;
3991 		ASSERT(pidx < sinfo->si_max_pages);
3992 
3993 		/*
3994 		 * the first page can get unmapped if it's shared with the
3995 		 * previous window. Even if the rest of this window is already
3996 		 * mapped in, we need to still check this one.
3997 		 */
3998 		pmap = &dma->dp_pgmap[pidx];
3999 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4000 			if (pmap->pm_pp != NULL) {
4001 				pmap->pm_mapped = B_TRUE;
4002 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4003 			} else if (pmap->pm_vaddr != NULL) {
4004 				pmap->pm_mapped = B_TRUE;
4005 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4006 				    pmap->pm_kaddr);
4007 			}
4008 		}
4009 		pidx++;
4010 
4011 		/* map in the rest of the pages if required */
4012 		if (window->wd_remap_copybuf) {
4013 			window->wd_remap_copybuf = B_FALSE;
4014 
4015 			/* figure out many pages this window takes up */
4016 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4017 			    MMU_PAGEOFFSET;
4018 			pcnt = mmu_btopr(window->wd_size + poff);
4019 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4020 
4021 			/* map pages which require it */
4022 			for (i = 1; i < pcnt; i++) {
4023 				pmap = &dma->dp_pgmap[pidx];
4024 				if (pmap->pm_uses_copybuf) {
4025 					ASSERT(pmap->pm_mapped == B_FALSE);
4026 					if (pmap->pm_pp != NULL) {
4027 						pmap->pm_mapped = B_TRUE;
4028 						i86_pp_map(pmap->pm_pp,
4029 						    pmap->pm_kaddr);
4030 					} else if (pmap->pm_vaddr != NULL) {
4031 						pmap->pm_mapped = B_TRUE;
4032 						i86_va_map(pmap->pm_vaddr,
4033 						    sinfo->si_asp,
4034 						    pmap->pm_kaddr);
4035 					}
4036 				}
4037 				pidx++;
4038 			}
4039 		}
4040 	}
4041 #endif
4042 
4043 	/* if the new window uses the copy buffer, sync it for the device */
4044 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4045 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4046 		    DDI_DMA_SYNC_FORDEV);
4047 	}
4048 
4049 	return (DDI_SUCCESS);
4050 }
4051 
4052 
4053 
4054 /*
4055  * ************************
4056  *  obsoleted dma routines
4057  * ************************
4058  */
4059 
4060 /*
4061  * rootnex_dma_map()
4062  *    called from ddi_dma_setup()
4063  */
4064 /* ARGSUSED */
4065 static int
4066 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq,
4067     ddi_dma_handle_t *handlep)
4068 {
4069 #if defined(__amd64)
4070 	/*
4071 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4072 	 * rootnex_dma_mctl()
4073 	 */
4074 	return (DDI_DMA_NORESOURCES);
4075 
4076 #else /* 32-bit x86 kernel */
4077 	ddi_dma_handle_t *lhandlep;
4078 	ddi_dma_handle_t lhandle;
4079 	ddi_dma_cookie_t cookie;
4080 	ddi_dma_attr_t dma_attr;
4081 	ddi_dma_lim_t *dma_lim;
4082 	uint_t ccnt;
4083 	int e;
4084 
4085 
4086 	/*
4087 	 * if the driver is just testing to see if it's possible to do the bind,
4088 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4089 	 */
4090 	if (handlep == NULL) {
4091 		lhandlep = &lhandle;
4092 	} else {
4093 		lhandlep = handlep;
4094 	}
4095 
4096 	/* convert the limit structure to a dma_attr one */
4097 	dma_lim = dmareq->dmar_limits;
4098 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4099 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4100 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4101 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4102 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4103 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4104 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4105 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4106 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4107 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4108 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4109 	dma_attr.dma_attr_flags = 0;
4110 
4111 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4112 	    dmareq->dmar_arg, lhandlep);
4113 	if (e != DDI_SUCCESS) {
4114 		return (e);
4115 	}
4116 
4117 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4118 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4119 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4120 		return (e);
4121 	}
4122 
4123 	/*
4124 	 * if the driver is just testing to see if it's possible to do the bind,
4125 	 * free up the local state and return the result.
4126 	 */
4127 	if (handlep == NULL) {
4128 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4129 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4130 		if (e == DDI_DMA_MAPPED) {
4131 			return (DDI_DMA_MAPOK);
4132 		} else {
4133 			return (DDI_DMA_NOMAPPING);
4134 		}
4135 	}
4136 
4137 	return (e);
4138 #endif /* defined(__amd64) */
4139 }
4140 
4141 
4142 /*
4143  * rootnex_dma_mctl()
4144  *
4145  */
4146 /* ARGSUSED */
4147 static int
4148 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4149     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4150     uint_t cache_flags)
4151 {
4152 #if defined(__amd64)
4153 	/*
4154 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4155 	 * common implementation in genunix, so they no longer have x86
4156 	 * specific functionality which called into dma_ctl.
4157 	 *
4158 	 * The rest of the obsoleted interfaces were never supported in the
4159 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4160 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4161 	 * implementation issues.
4162 	 *
4163 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4164 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4165 	 * reflect that now too...
4166 	 *
4167 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4168 	 * not going to put this functionality into the 64-bit x86 kernel now.
4169 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4170 	 * that in a future release.
4171 	 */
4172 	return (DDI_FAILURE);
4173 
4174 #else /* 32-bit x86 kernel */
4175 	ddi_dma_cookie_t lcookie;
4176 	ddi_dma_cookie_t *cookie;
4177 	rootnex_window_t *window;
4178 	ddi_dma_impl_t *hp;
4179 	rootnex_dma_t *dma;
4180 	uint_t nwin;
4181 	uint_t ccnt;
4182 	size_t len;
4183 	off_t off;
4184 	int e;
4185 
4186 
4187 	/*
4188 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4189 	 * hacky since were optimizing for the current interfaces and so we can
4190 	 * cleanup the mess in genunix. Hopefully we will remove the this
4191 	 * obsoleted routines someday soon.
4192 	 */
4193 
4194 	switch (request) {
4195 
4196 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4197 		hp = (ddi_dma_impl_t *)handle;
4198 		cookie = (ddi_dma_cookie_t *)objpp;
4199 
4200 		/*
4201 		 * convert segment to cookie. We don't distinguish between the
4202 		 * two :-)
4203 		 */
4204 		*cookie = *hp->dmai_cookie;
4205 		*lenp = cookie->dmac_size;
4206 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4207 		return (DDI_SUCCESS);
4208 
4209 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4210 		hp = (ddi_dma_impl_t *)handle;
4211 		dma = (rootnex_dma_t *)hp->dmai_private;
4212 
4213 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4214 			return (DDI_DMA_STALE);
4215 		}
4216 
4217 		/* handle the case where we don't have any windows */
4218 		if (dma->dp_window == NULL) {
4219 			/*
4220 			 * if seg == NULL, and we don't have any windows,
4221 			 * return the first cookie in the sgl.
4222 			 */
4223 			if (*lenp == NULL) {
4224 				dma->dp_current_cookie = 0;
4225 				hp->dmai_cookie = dma->dp_cookies;
4226 				*objpp = (caddr_t)handle;
4227 				return (DDI_SUCCESS);
4228 
4229 			/* if we have more cookies, go to the next cookie */
4230 			} else {
4231 				if ((dma->dp_current_cookie + 1) >=
4232 				    dma->dp_sglinfo.si_sgl_size) {
4233 					return (DDI_DMA_DONE);
4234 				}
4235 				dma->dp_current_cookie++;
4236 				hp->dmai_cookie++;
4237 				return (DDI_SUCCESS);
4238 			}
4239 		}
4240 
4241 		/* We have one or more windows */
4242 		window = &dma->dp_window[dma->dp_current_win];
4243 
4244 		/*
4245 		 * if seg == NULL, return the first cookie in the current
4246 		 * window
4247 		 */
4248 		if (*lenp == NULL) {
4249 			dma->dp_current_cookie = 0;
4250 			hp->dmai_cookie = window->wd_first_cookie;
4251 
4252 		/*
4253 		 * go to the next cookie in the window then see if we done with
4254 		 * this window.
4255 		 */
4256 		} else {
4257 			if ((dma->dp_current_cookie + 1) >=
4258 			    window->wd_cookie_cnt) {
4259 				return (DDI_DMA_DONE);
4260 			}
4261 			dma->dp_current_cookie++;
4262 			hp->dmai_cookie++;
4263 		}
4264 		*objpp = (caddr_t)handle;
4265 		return (DDI_SUCCESS);
4266 
4267 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4268 		hp = (ddi_dma_impl_t *)handle;
4269 		dma = (rootnex_dma_t *)hp->dmai_private;
4270 
4271 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4272 			return (DDI_DMA_STALE);
4273 		}
4274 
4275 		/* if win == NULL, return the first window in the bind */
4276 		if (*offp == NULL) {
4277 			nwin = 0;
4278 
4279 		/*
4280 		 * else, go to the next window then see if we're done with all
4281 		 * the windows.
4282 		 */
4283 		} else {
4284 			nwin = dma->dp_current_win + 1;
4285 			if (nwin >= hp->dmai_nwin) {
4286 				return (DDI_DMA_DONE);
4287 			}
4288 		}
4289 
4290 		/* switch to the next window */
4291 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4292 		    &lcookie, &ccnt);
4293 		ASSERT(e == DDI_SUCCESS);
4294 		if (e != DDI_SUCCESS) {
4295 			return (DDI_DMA_STALE);
4296 		}
4297 
4298 		/* reset the cookie back to the first cookie in the window */
4299 		if (dma->dp_window != NULL) {
4300 			window = &dma->dp_window[dma->dp_current_win];
4301 			hp->dmai_cookie = window->wd_first_cookie;
4302 		} else {
4303 			hp->dmai_cookie = dma->dp_cookies;
4304 		}
4305 
4306 		*objpp = (caddr_t)handle;
4307 		return (DDI_SUCCESS);
4308 
4309 	case DDI_DMA_FREE: /* ddi_dma_free() */
4310 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4311 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4312 		if (rootnex_state->r_dvma_call_list_id) {
4313 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4314 		}
4315 		return (DDI_SUCCESS);
4316 
4317 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4318 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4319 		/* should never get here, handled in genunix */
4320 		ASSERT(0);
4321 		return (DDI_FAILURE);
4322 
4323 	case DDI_DMA_KVADDR:
4324 	case DDI_DMA_GETERR:
4325 	case DDI_DMA_COFF:
4326 		return (DDI_FAILURE);
4327 	}
4328 
4329 	return (DDI_FAILURE);
4330 #endif /* defined(__amd64) */
4331 }
4332 
4333 
4334 /*
4335  * *********
4336  *  FMA Code
4337  * *********
4338  */
4339 
4340 /*
4341  * rootnex_fm_init()
4342  *    FMA init busop
4343  */
4344 /* ARGSUSED */
4345 static int
4346 rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
4347     ddi_iblock_cookie_t *ibc)
4348 {
4349 	*ibc = rootnex_state->r_err_ibc;
4350 
4351 	return (ddi_system_fmcap);
4352 }
4353 
4354 /*
4355  * rootnex_dma_check()
4356  *    Function called after a dma fault occurred to find out whether the
4357  *    fault address is associated with a driver that is able to handle faults
4358  *    and recover from faults.
4359  */
4360 /* ARGSUSED */
4361 static int
4362 rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
4363     const void *not_used)
4364 {
4365 	rootnex_window_t *window;
4366 	uint64_t start_addr;
4367 	uint64_t fault_addr;
4368 	ddi_dma_impl_t *hp;
4369 	rootnex_dma_t *dma;
4370 	uint64_t end_addr;
4371 	size_t csize;
4372 	int i;
4373 	int j;
4374 
4375 
4376 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
4377 	hp = (ddi_dma_impl_t *)handle;
4378 	ASSERT(hp);
4379 
4380 	dma = (rootnex_dma_t *)hp->dmai_private;
4381 
4382 	/* Get the address that we need to search for */
4383 	fault_addr = *(uint64_t *)addr;
4384 
4385 	/*
4386 	 * if we don't have any windows, we can just walk through all the
4387 	 * cookies.
4388 	 */
4389 	if (dma->dp_window == NULL) {
4390 		/* for each cookie */
4391 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
4392 			/*
4393 			 * if the faulted address is within the physical address
4394 			 * range of the cookie, return DDI_FM_NONFATAL.
4395 			 */
4396 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
4397 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
4398 			    dma->dp_cookies[i].dmac_size))) {
4399 				return (DDI_FM_NONFATAL);
4400 			}
4401 		}
4402 
4403 		/* fault_addr not within this DMA handle */
4404 		return (DDI_FM_UNKNOWN);
4405 	}
4406 
4407 	/* we have mutiple windows, walk through each window */
4408 	for (i = 0; i < hp->dmai_nwin; i++) {
4409 		window = &dma->dp_window[i];
4410 
4411 		/* Go through all the cookies in the window */
4412 		for (j = 0; j < window->wd_cookie_cnt; j++) {
4413 
4414 			start_addr = window->wd_first_cookie[j].dmac_laddress;
4415 			csize = window->wd_first_cookie[j].dmac_size;
4416 
4417 			/*
4418 			 * if we are trimming the first cookie in the window,
4419 			 * and this is the first cookie, adjust the start
4420 			 * address and size of the cookie to account for the
4421 			 * trim.
4422 			 */
4423 			if (window->wd_trim.tr_trim_first && (j == 0)) {
4424 				start_addr = window->wd_trim.tr_first_paddr;
4425 				csize = window->wd_trim.tr_first_size;
4426 			}
4427 
4428 			/*
4429 			 * if we are trimming the last cookie in the window,
4430 			 * and this is the last cookie, adjust the start
4431 			 * address and size of the cookie to account for the
4432 			 * trim.
4433 			 */
4434 			if (window->wd_trim.tr_trim_last &&
4435 			    (j == (window->wd_cookie_cnt - 1))) {
4436 				start_addr = window->wd_trim.tr_last_paddr;
4437 				csize = window->wd_trim.tr_last_size;
4438 			}
4439 
4440 			end_addr = start_addr + csize;
4441 
4442 			/*
4443 			 * if the faulted address is within the physical address
4444 			 * range of the cookie, return DDI_FM_NONFATAL.
4445 			 */
4446 			if ((fault_addr >= start_addr) &&
4447 			    (fault_addr <= end_addr)) {
4448 				return (DDI_FM_NONFATAL);
4449 			}
4450 		}
4451 	}
4452 
4453 	/* fault_addr not within this DMA handle */
4454 	return (DDI_FM_UNKNOWN);
4455 }
4456