xref: /titanic_50/usr/src/uts/i86pc/io/rootnex.c (revision 14d56903b5e712ea59206c206e57d3731107fcf1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * x86 root nexus driver
31  */
32 
33 #include <sys/sysmacros.h>
34 #include <sys/conf.h>
35 #include <sys/autoconf.h>
36 #include <sys/sysmacros.h>
37 #include <sys/debug.h>
38 #include <sys/psw.h>
39 #include <sys/ddidmareq.h>
40 #include <sys/promif.h>
41 #include <sys/devops.h>
42 #include <sys/kmem.h>
43 #include <sys/cmn_err.h>
44 #include <vm/seg.h>
45 #include <vm/seg_kmem.h>
46 #include <vm/seg_dev.h>
47 #include <sys/vmem.h>
48 #include <sys/mman.h>
49 #include <vm/hat.h>
50 #include <vm/as.h>
51 #include <vm/page.h>
52 #include <sys/avintr.h>
53 #include <sys/errno.h>
54 #include <sys/modctl.h>
55 #include <sys/ddi_impldefs.h>
56 #include <sys/sunddi.h>
57 #include <sys/sunndi.h>
58 #include <sys/mach_intr.h>
59 #include <sys/psm.h>
60 #include <sys/ontrap.h>
61 #include <sys/atomic.h>
62 #include <sys/sdt.h>
63 #include <sys/rootnex.h>
64 #include <vm/hat_i86.h>
65 
66 /*
67  * enable/disable extra checking of function parameters. Useful for debugging
68  * drivers.
69  */
70 #ifdef	DEBUG
71 int rootnex_alloc_check_parms = 1;
72 int rootnex_bind_check_parms = 1;
73 int rootnex_bind_check_inuse = 1;
74 int rootnex_unbind_verify_buffer = 0;
75 int rootnex_sync_check_parms = 1;
76 #else
77 int rootnex_alloc_check_parms = 0;
78 int rootnex_bind_check_parms = 0;
79 int rootnex_bind_check_inuse = 0;
80 int rootnex_unbind_verify_buffer = 0;
81 int rootnex_sync_check_parms = 0;
82 #endif
83 
84 /* Master Abort and Target Abort panic flag */
85 int rootnex_fm_ma_ta_panic_flag = 0;
86 
87 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
88 int rootnex_bind_fail = 1;
89 int rootnex_bind_warn = 1;
90 uint8_t *rootnex_warn_list;
91 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
92 #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
93 
94 /*
95  * revert back to old broken behavior of always sync'ing entire copy buffer.
96  * This is useful if be have a buggy driver which doesn't correctly pass in
97  * the offset and size into ddi_dma_sync().
98  */
99 int rootnex_sync_ignore_params = 0;
100 
101 /*
102  * maximum size that we will allow for a copy buffer. Can be patched on the
103  * fly
104  */
105 size_t rootnex_max_copybuf_size = 0x100000;
106 
107 /*
108  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
109  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
110  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
111  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
112  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
113  * (< 8K). We will still need to allocate the copy buffer during bind though
114  * (if we need one). These can only be modified in /etc/system before rootnex
115  * attach.
116  */
117 #if defined(__amd64)
118 int rootnex_prealloc_cookies = 65;
119 int rootnex_prealloc_windows = 4;
120 int rootnex_prealloc_copybuf = 2;
121 #else
122 int rootnex_prealloc_cookies = 33;
123 int rootnex_prealloc_windows = 4;
124 int rootnex_prealloc_copybuf = 2;
125 #endif
126 
127 /* driver global state */
128 static rootnex_state_t *rootnex_state;
129 
130 /* shortcut to rootnex counters */
131 static uint64_t *rootnex_cnt;
132 
133 /*
134  * XXX - does x86 even need these or are they left over from the SPARC days?
135  */
136 /* statically defined integer/boolean properties for the root node */
137 static rootnex_intprop_t rootnex_intprp[] = {
138 	{ "PAGESIZE",			PAGESIZE },
139 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
140 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
141 	{ DDI_RELATIVE_ADDRESSING,	1 },
142 };
143 #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
144 
145 
146 static struct cb_ops rootnex_cb_ops = {
147 	nodev,		/* open */
148 	nodev,		/* close */
149 	nodev,		/* strategy */
150 	nodev,		/* print */
151 	nodev,		/* dump */
152 	nodev,		/* read */
153 	nodev,		/* write */
154 	nodev,		/* ioctl */
155 	nodev,		/* devmap */
156 	nodev,		/* mmap */
157 	nodev,		/* segmap */
158 	nochpoll,	/* chpoll */
159 	ddi_prop_op,	/* cb_prop_op */
160 	NULL,		/* struct streamtab */
161 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
162 	CB_REV,		/* Rev */
163 	nodev,		/* cb_aread */
164 	nodev		/* cb_awrite */
165 };
166 
167 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
168     off_t offset, off_t len, caddr_t *vaddrp);
169 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
170     struct hat *hat, struct seg *seg, caddr_t addr,
171     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
172 static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
173     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
174 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
175     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
176     ddi_dma_handle_t *handlep);
177 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
178     ddi_dma_handle_t handle);
179 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
180     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
181     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
182 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
183     ddi_dma_handle_t handle);
184 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
185     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
186 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
187     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
188     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
189 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
190     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
191     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
192 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
193     ddi_ctl_enum_t ctlop, void *arg, void *result);
194 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
195     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
196 
197 
198 static struct bus_ops rootnex_bus_ops = {
199 	BUSO_REV,
200 	rootnex_map,
201 	NULL,
202 	NULL,
203 	NULL,
204 	rootnex_map_fault,
205 	rootnex_dma_map,
206 	rootnex_dma_allochdl,
207 	rootnex_dma_freehdl,
208 	rootnex_dma_bindhdl,
209 	rootnex_dma_unbindhdl,
210 	rootnex_dma_sync,
211 	rootnex_dma_win,
212 	rootnex_dma_mctl,
213 	rootnex_ctlops,
214 	ddi_bus_prop_op,
215 	i_ddi_rootnex_get_eventcookie,
216 	i_ddi_rootnex_add_eventcall,
217 	i_ddi_rootnex_remove_eventcall,
218 	i_ddi_rootnex_post_event,
219 	0,			/* bus_intr_ctl */
220 	0,			/* bus_config */
221 	0,			/* bus_unconfig */
222 	NULL,			/* bus_fm_init */
223 	NULL,			/* bus_fm_fini */
224 	NULL,			/* bus_fm_access_enter */
225 	NULL,			/* bus_fm_access_exit */
226 	NULL,			/* bus_powr */
227 	rootnex_intr_ops	/* bus_intr_op */
228 };
229 
230 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
231 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
232 
233 static struct dev_ops rootnex_ops = {
234 	DEVO_REV,
235 	0,
236 	ddi_no_info,
237 	nulldev,
238 	nulldev,
239 	rootnex_attach,
240 	rootnex_detach,
241 	nulldev,
242 	&rootnex_cb_ops,
243 	&rootnex_bus_ops
244 };
245 
246 static struct modldrv rootnex_modldrv = {
247 	&mod_driverops,
248 	"i86pc root nexus %I%",
249 	&rootnex_ops
250 };
251 
252 static struct modlinkage rootnex_modlinkage = {
253 	MODREV_1,
254 	(void *)&rootnex_modldrv,
255 	NULL
256 };
257 
258 
259 /*
260  *  extern hacks
261  */
262 extern struct seg_ops segdev_ops;
263 extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
264 #ifdef	DDI_MAP_DEBUG
265 extern int ddi_map_debug_flag;
266 #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
267 #endif
268 #define	ptob64(x)	(((uint64_t)(x)) << MMU_PAGESHIFT)
269 extern void i86_pp_map(page_t *pp, caddr_t kaddr);
270 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
271 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
272     psm_intr_op_t, int *);
273 extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
274 extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
275 /*
276  * Use device arena to use for device control register mappings.
277  * Various kernel memory walkers (debugger, dtrace) need to know
278  * to avoid this address range to prevent undesired device activity.
279  */
280 extern void *device_arena_alloc(size_t size, int vm_flag);
281 extern void device_arena_free(void * vaddr, size_t size);
282 
283 
284 /*
285  *  Internal functions
286  */
287 static int rootnex_dma_init();
288 static void rootnex_add_props(dev_info_t *);
289 static int rootnex_ctl_reportdev(dev_info_t *dip);
290 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
291 static int rootnex_ctlops_poke(peekpoke_ctlops_t *in_args);
292 static int rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result);
293 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
294 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
295 static int rootnex_map_handle(ddi_map_req_t *mp);
296 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
297 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
298 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
299     ddi_dma_attr_t *attr);
300 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
301     rootnex_sglinfo_t *sglinfo);
302 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
303     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
304 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
305     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
306 static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
307 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
308     ddi_dma_attr_t *attr, int kmflag);
309 static void rootnex_teardown_windows(rootnex_dma_t *dma);
310 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
311     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
312 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
313     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
314     size_t *copybuf_used, page_t **cur_pp);
315 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
316     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
317     ddi_dma_attr_t *attr, off_t cur_offset);
318 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
319     rootnex_dma_t *dma, rootnex_window_t **windowp,
320     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
321 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
322     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
323 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
324     off_t offset, size_t size, uint_t cache_flags);
325 static int rootnex_verify_buffer(rootnex_dma_t *dma);
326 static int rootnex_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr,
327     const void *no_used);
328 
329 
330 /*
331  * _init()
332  *
333  */
334 int
335 _init(void)
336 {
337 
338 	rootnex_state = NULL;
339 	return (mod_install(&rootnex_modlinkage));
340 }
341 
342 
343 /*
344  * _info()
345  *
346  */
347 int
348 _info(struct modinfo *modinfop)
349 {
350 	return (mod_info(&rootnex_modlinkage, modinfop));
351 }
352 
353 
354 /*
355  * _fini()
356  *
357  */
358 int
359 _fini(void)
360 {
361 	return (EBUSY);
362 }
363 
364 
365 /*
366  * rootnex_attach()
367  *
368  */
369 static int
370 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
371 {
372 	int fmcap;
373 	int e;
374 
375 
376 	switch (cmd) {
377 	case DDI_ATTACH:
378 		break;
379 	case DDI_RESUME:
380 		return (DDI_SUCCESS);
381 	default:
382 		return (DDI_FAILURE);
383 	}
384 
385 	/*
386 	 * We should only have one instance of rootnex. Save it away since we
387 	 * don't have an easy way to get it back later.
388 	 */
389 	ASSERT(rootnex_state == NULL);
390 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
391 
392 	rootnex_state->r_dip = dip;
393 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
394 	rootnex_state->r_reserved_msg_printed = B_FALSE;
395 	rootnex_cnt = &rootnex_state->r_counters[0];
396 
397 	mutex_init(&rootnex_state->r_peekpoke_mutex, NULL, MUTEX_SPIN,
398 	    (void *)ipltospl(15));
399 
400 	/*
401 	 * Set minimum fm capability level for i86pc platforms and then
402 	 * initialize error handling. Since we're the rootnex, we don't
403 	 * care what's returned in the fmcap field.
404 	 */
405 	ddi_system_fmcap = DDI_FM_ERRCB_CAPABLE;
406 	fmcap = ddi_system_fmcap;
407 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
408 	if (fmcap & DDI_FM_ERRCB_CAPABLE)
409 		ddi_fm_handler_register(dip, rootnex_fm_callback, NULL);
410 
411 	/* initialize DMA related state */
412 	e = rootnex_dma_init();
413 	if (e != DDI_SUCCESS) {
414 		mutex_destroy(&rootnex_state->r_peekpoke_mutex);
415 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
416 		return (DDI_FAILURE);
417 	}
418 
419 	/* Add static root node properties */
420 	rootnex_add_props(dip);
421 
422 	/* since we can't call ddi_report_dev() */
423 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
424 
425 	/* Initialize rootnex event handle */
426 	i_ddi_rootnex_init_events(dip);
427 
428 	return (DDI_SUCCESS);
429 }
430 
431 
432 /*
433  * rootnex_detach()
434  *
435  */
436 /*ARGSUSED*/
437 static int
438 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
439 {
440 	switch (cmd) {
441 	case DDI_SUSPEND:
442 		break;
443 	default:
444 		return (DDI_FAILURE);
445 	}
446 
447 	return (DDI_SUCCESS);
448 }
449 
450 
451 /*
452  * rootnex_dma_init()
453  *
454  */
455 /*ARGSUSED*/
456 static int
457 rootnex_dma_init()
458 {
459 	size_t bufsize;
460 
461 
462 	/*
463 	 * size of our cookie/window/copybuf state needed in dma bind that we
464 	 * pre-alloc in dma_alloc_handle
465 	 */
466 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
467 	rootnex_state->r_prealloc_size =
468 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
469 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
470 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
471 
472 	/*
473 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
474 	 * allocate 16 extra bytes for struct pointer alignment
475 	 * (p->dmai_private & dma->dp_prealloc_buffer)
476 	 */
477 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
478 	    rootnex_state->r_prealloc_size + 0x10;
479 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
480 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
481 	if (rootnex_state->r_dmahdl_cache == NULL) {
482 		return (DDI_FAILURE);
483 	}
484 
485 	/*
486 	 * allocate array to track which major numbers we have printed warnings
487 	 * for.
488 	 */
489 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
490 	    KM_SLEEP);
491 
492 	return (DDI_SUCCESS);
493 }
494 
495 
496 /*
497  * rootnex_add_props()
498  *
499  */
500 static void
501 rootnex_add_props(dev_info_t *dip)
502 {
503 	rootnex_intprop_t *rpp;
504 	int i;
505 
506 	/* Add static integer/boolean properties to the root node */
507 	rpp = rootnex_intprp;
508 	for (i = 0; i < NROOT_INTPROPS; i++) {
509 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
510 		    rpp[i].prop_name, rpp[i].prop_value);
511 	}
512 }
513 
514 
515 
516 /*
517  * *************************
518  *  ctlops related routines
519  * *************************
520  */
521 
522 /*
523  * rootnex_ctlops()
524  *
525  */
526 /*ARGSUSED*/
527 static int
528 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
529     void *arg, void *result)
530 {
531 	int n, *ptr;
532 	struct ddi_parent_private_data *pdp;
533 
534 	switch (ctlop) {
535 	case DDI_CTLOPS_DMAPMAPC:
536 		/*
537 		 * Return 'partial' to indicate that dma mapping
538 		 * has to be done in the main MMU.
539 		 */
540 		return (DDI_DMA_PARTIAL);
541 
542 	case DDI_CTLOPS_BTOP:
543 		/*
544 		 * Convert byte count input to physical page units.
545 		 * (byte counts that are not a page-size multiple
546 		 * are rounded down)
547 		 */
548 		*(ulong_t *)result = btop(*(ulong_t *)arg);
549 		return (DDI_SUCCESS);
550 
551 	case DDI_CTLOPS_PTOB:
552 		/*
553 		 * Convert size in physical pages to bytes
554 		 */
555 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
556 		return (DDI_SUCCESS);
557 
558 	case DDI_CTLOPS_BTOPR:
559 		/*
560 		 * Convert byte count input to physical page units
561 		 * (byte counts that are not a page-size multiple
562 		 * are rounded up)
563 		 */
564 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
565 		return (DDI_SUCCESS);
566 
567 	case DDI_CTLOPS_POKE:
568 		return (rootnex_ctlops_poke((peekpoke_ctlops_t *)arg));
569 
570 	case DDI_CTLOPS_PEEK:
571 		return (rootnex_ctlops_peek((peekpoke_ctlops_t *)arg, result));
572 
573 	case DDI_CTLOPS_INITCHILD:
574 		return (impl_ddi_sunbus_initchild(arg));
575 
576 	case DDI_CTLOPS_UNINITCHILD:
577 		impl_ddi_sunbus_removechild(arg);
578 		return (DDI_SUCCESS);
579 
580 	case DDI_CTLOPS_REPORTDEV:
581 		return (rootnex_ctl_reportdev(rdip));
582 
583 	case DDI_CTLOPS_IOMIN:
584 		/*
585 		 * Nothing to do here but reflect back..
586 		 */
587 		return (DDI_SUCCESS);
588 
589 	case DDI_CTLOPS_REGSIZE:
590 	case DDI_CTLOPS_NREGS:
591 		break;
592 
593 	case DDI_CTLOPS_SIDDEV:
594 		if (ndi_dev_is_prom_node(rdip))
595 			return (DDI_SUCCESS);
596 		if (ndi_dev_is_persistent_node(rdip))
597 			return (DDI_SUCCESS);
598 		return (DDI_FAILURE);
599 
600 	case DDI_CTLOPS_POWER:
601 		return ((*pm_platform_power)((power_req_t *)arg));
602 
603 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
604 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
605 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
606 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
607 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
608 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
609 		if (!rootnex_state->r_reserved_msg_printed) {
610 			rootnex_state->r_reserved_msg_printed = B_TRUE;
611 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
612 			    "1 or more reserved/obsolete operations.");
613 		}
614 		return (DDI_FAILURE);
615 
616 	default:
617 		return (DDI_FAILURE);
618 	}
619 	/*
620 	 * The rest are for "hardware" properties
621 	 */
622 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
623 		return (DDI_FAILURE);
624 
625 	if (ctlop == DDI_CTLOPS_NREGS) {
626 		ptr = (int *)result;
627 		*ptr = pdp->par_nreg;
628 	} else {
629 		off_t *size = (off_t *)result;
630 
631 		ptr = (int *)arg;
632 		n = *ptr;
633 		if (n >= pdp->par_nreg) {
634 			return (DDI_FAILURE);
635 		}
636 		*size = (off_t)pdp->par_reg[n].regspec_size;
637 	}
638 	return (DDI_SUCCESS);
639 }
640 
641 
642 /*
643  * rootnex_ctl_reportdev()
644  *
645  */
646 static int
647 rootnex_ctl_reportdev(dev_info_t *dev)
648 {
649 	int i, n, len, f_len = 0;
650 	char *buf;
651 
652 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
653 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
654 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
655 	len = strlen(buf);
656 
657 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
658 
659 		struct regspec *rp = sparc_pd_getreg(dev, i);
660 
661 		if (i == 0)
662 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
663 			    ": ");
664 		else
665 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
666 			    " and ");
667 		len = strlen(buf);
668 
669 		switch (rp->regspec_bustype) {
670 
671 		case BTEISA:
672 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
673 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
674 			break;
675 
676 		case BTISA:
677 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
678 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
679 			break;
680 
681 		default:
682 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
683 			    "space %x offset %x",
684 			    rp->regspec_bustype, rp->regspec_addr);
685 			break;
686 		}
687 		len = strlen(buf);
688 	}
689 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
690 		int pri;
691 
692 		if (i != 0) {
693 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
694 			    ",");
695 			len = strlen(buf);
696 		}
697 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
698 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
699 		    " sparc ipl %d", pri);
700 		len = strlen(buf);
701 	}
702 #ifdef DEBUG
703 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
704 		cmn_err(CE_NOTE, "next message is truncated: "
705 		    "printed length 1024, real length %d", f_len);
706 	}
707 #endif /* DEBUG */
708 	cmn_err(CE_CONT, "?%s\n", buf);
709 	kmem_free(buf, REPORTDEV_BUFSIZE);
710 	return (DDI_SUCCESS);
711 }
712 
713 
714 /*
715  * rootnex_ctlops_poke()
716  *
717  */
718 static int
719 rootnex_ctlops_poke(peekpoke_ctlops_t *in_args)
720 {
721 	int err = DDI_SUCCESS;
722 	on_trap_data_t otd;
723 
724 	/* Cautious access not supported. */
725 	if (in_args->handle != NULL)
726 		return (DDI_FAILURE);
727 
728 	mutex_enter(&rootnex_state->r_peekpoke_mutex);
729 
730 	/* Set up protected environment. */
731 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
732 		switch (in_args->size) {
733 		case sizeof (uint8_t):
734 			*(uint8_t *)in_args->dev_addr = *(uint8_t *)
735 			    in_args->host_addr;
736 			break;
737 
738 		case sizeof (uint16_t):
739 			*(uint16_t *)in_args->dev_addr =
740 			    *(uint16_t *)in_args->host_addr;
741 			break;
742 
743 		case sizeof (uint32_t):
744 			*(uint32_t *)in_args->dev_addr =
745 			    *(uint32_t *)in_args->host_addr;
746 			break;
747 
748 		case sizeof (uint64_t):
749 			*(uint64_t *)in_args->dev_addr =
750 			    *(uint64_t *)in_args->host_addr;
751 			break;
752 
753 		default:
754 			err = DDI_FAILURE;
755 			break;
756 		}
757 	} else
758 		err = DDI_FAILURE;
759 
760 	/* Take down protected environment. */
761 	no_trap();
762 	mutex_exit(&rootnex_state->r_peekpoke_mutex);
763 
764 	return (err);
765 }
766 
767 
768 /*
769  * rootnex_ctlops_peek()
770  *
771  */
772 static int
773 rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result)
774 {
775 	int err = DDI_SUCCESS;
776 	on_trap_data_t otd;
777 
778 	/* Cautious access not supported. */
779 	if (in_args->handle != NULL)
780 		return (DDI_FAILURE);
781 
782 	mutex_enter(&rootnex_state->r_peekpoke_mutex);
783 
784 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
785 		switch (in_args->size) {
786 		case sizeof (uint8_t):
787 			*(uint8_t *)in_args->host_addr =
788 			    *(uint8_t *)in_args->dev_addr;
789 			break;
790 
791 		case sizeof (uint16_t):
792 			*(uint16_t *)in_args->host_addr =
793 			    *(uint16_t *)in_args->dev_addr;
794 			break;
795 
796 		case sizeof (uint32_t):
797 			*(uint32_t *)in_args->host_addr =
798 			    *(uint32_t *)in_args->dev_addr;
799 			break;
800 
801 		case sizeof (uint64_t):
802 			*(uint64_t *)in_args->host_addr =
803 			    *(uint64_t *)in_args->dev_addr;
804 			break;
805 
806 		default:
807 			err = DDI_FAILURE;
808 			break;
809 		}
810 		result = (void *)in_args->host_addr;
811 	} else
812 		err = DDI_FAILURE;
813 
814 	no_trap();
815 	mutex_exit(&rootnex_state->r_peekpoke_mutex);
816 
817 	return (err);
818 }
819 
820 
821 
822 /*
823  * ******************
824  *  map related code
825  * ******************
826  */
827 
828 /*
829  * rootnex_map()
830  *
831  */
832 static int
833 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
834     off_t len, caddr_t *vaddrp)
835 {
836 	struct regspec *rp, tmp_reg;
837 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
838 	int error;
839 
840 	mp = &mr;
841 
842 	switch (mp->map_op)  {
843 	case DDI_MO_MAP_LOCKED:
844 	case DDI_MO_UNMAP:
845 	case DDI_MO_MAP_HANDLE:
846 		break;
847 	default:
848 #ifdef	DDI_MAP_DEBUG
849 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
850 		    mp->map_op);
851 #endif	/* DDI_MAP_DEBUG */
852 		return (DDI_ME_UNIMPLEMENTED);
853 	}
854 
855 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
856 #ifdef	DDI_MAP_DEBUG
857 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
858 #endif	/* DDI_MAP_DEBUG */
859 		return (DDI_ME_UNIMPLEMENTED);
860 	}
861 
862 	/*
863 	 * First, if given an rnumber, convert it to a regspec...
864 	 * (Presumably, this is on behalf of a child of the root node?)
865 	 */
866 
867 	if (mp->map_type == DDI_MT_RNUMBER)  {
868 
869 		int rnumber = mp->map_obj.rnumber;
870 #ifdef	DDI_MAP_DEBUG
871 		static char *out_of_range =
872 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
873 #endif	/* DDI_MAP_DEBUG */
874 
875 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
876 		if (rp == NULL)  {
877 #ifdef	DDI_MAP_DEBUG
878 			cmn_err(CE_WARN, out_of_range, rnumber,
879 			    ddi_get_name(rdip));
880 #endif	/* DDI_MAP_DEBUG */
881 			return (DDI_ME_RNUMBER_RANGE);
882 		}
883 
884 		/*
885 		 * Convert the given ddi_map_req_t from rnumber to regspec...
886 		 */
887 
888 		mp->map_type = DDI_MT_REGSPEC;
889 		mp->map_obj.rp = rp;
890 	}
891 
892 	/*
893 	 * Adjust offset and length correspnding to called values...
894 	 * XXX: A non-zero length means override the one in the regspec
895 	 * XXX: (regardless of what's in the parent's range?)
896 	 */
897 
898 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
899 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
900 
901 #ifdef	DDI_MAP_DEBUG
902 	cmn_err(CE_CONT,
903 		"rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>"
904 		" offset %d len %d handle 0x%x\n",
905 		ddi_get_name(dip), ddi_get_name(rdip),
906 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
907 		offset, len, mp->map_handlep);
908 #endif	/* DDI_MAP_DEBUG */
909 
910 	/*
911 	 * I/O or memory mapping:
912 	 *
913 	 *	<bustype=0, addr=x, len=x>: memory
914 	 *	<bustype=1, addr=x, len=x>: i/o
915 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
916 	 */
917 
918 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
919 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
920 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
921 		    ddi_get_name(rdip), rp->regspec_bustype,
922 		    rp->regspec_addr, rp->regspec_size);
923 		return (DDI_ME_INVAL);
924 	}
925 
926 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
927 		/*
928 		 * compatibility i/o mapping
929 		 */
930 		rp->regspec_bustype += (uint_t)offset;
931 	} else {
932 		/*
933 		 * Normal memory or i/o mapping
934 		 */
935 		rp->regspec_addr += (uint_t)offset;
936 	}
937 
938 	if (len != 0)
939 		rp->regspec_size = (uint_t)len;
940 
941 #ifdef	DDI_MAP_DEBUG
942 	cmn_err(CE_CONT,
943 		"             <%s,%s> <0x%x, 0x%x, 0x%d>"
944 		" offset %d len %d handle 0x%x\n",
945 		ddi_get_name(dip), ddi_get_name(rdip),
946 		rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
947 		offset, len, mp->map_handlep);
948 #endif	/* DDI_MAP_DEBUG */
949 
950 	/*
951 	 * Apply any parent ranges at this level, if applicable.
952 	 * (This is where nexus specific regspec translation takes place.
953 	 * Use of this function is implicit agreement that translation is
954 	 * provided via ddi_apply_range.)
955 	 */
956 
957 #ifdef	DDI_MAP_DEBUG
958 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
959 	    ddi_get_name(dip), ddi_get_name(rdip));
960 #endif	/* DDI_MAP_DEBUG */
961 
962 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
963 		return (error);
964 
965 	switch (mp->map_op)  {
966 	case DDI_MO_MAP_LOCKED:
967 
968 		/*
969 		 * Set up the locked down kernel mapping to the regspec...
970 		 */
971 
972 		return (rootnex_map_regspec(mp, vaddrp));
973 
974 	case DDI_MO_UNMAP:
975 
976 		/*
977 		 * Release mapping...
978 		 */
979 
980 		return (rootnex_unmap_regspec(mp, vaddrp));
981 
982 	case DDI_MO_MAP_HANDLE:
983 
984 		return (rootnex_map_handle(mp));
985 
986 	default:
987 		return (DDI_ME_UNIMPLEMENTED);
988 	}
989 }
990 
991 
992 /*
993  * rootnex_map_fault()
994  *
995  *	fault in mappings for requestors
996  */
997 /*ARGSUSED*/
998 static int
999 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
1000     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
1001     uint_t lock)
1002 {
1003 
1004 #ifdef	DDI_MAP_DEBUG
1005 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
1006 	ddi_map_debug(" Seg <%s>\n",
1007 	    seg->s_ops == &segdev_ops ? "segdev" :
1008 	    seg == &kvseg ? "segkmem" : "NONE!");
1009 #endif	/* DDI_MAP_DEBUG */
1010 
1011 	/*
1012 	 * This is all terribly broken, but it is a start
1013 	 *
1014 	 * XXX	Note that this test means that segdev_ops
1015 	 *	must be exported from seg_dev.c.
1016 	 * XXX	What about devices with their own segment drivers?
1017 	 */
1018 	if (seg->s_ops == &segdev_ops) {
1019 		struct segdev_data *sdp =
1020 			(struct segdev_data *)seg->s_data;
1021 
1022 		if (hat == NULL) {
1023 			/*
1024 			 * This is one plausible interpretation of
1025 			 * a null hat i.e. use the first hat on the
1026 			 * address space hat list which by convention is
1027 			 * the hat of the system MMU.  At alternative
1028 			 * would be to panic .. this might well be better ..
1029 			 */
1030 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
1031 			hat = seg->s_as->a_hat;
1032 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
1033 		}
1034 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
1035 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
1036 	} else if (seg == &kvseg && dp == NULL) {
1037 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
1038 		    HAT_LOAD_LOCK);
1039 	} else
1040 		return (DDI_FAILURE);
1041 	return (DDI_SUCCESS);
1042 }
1043 
1044 
1045 /*
1046  * rootnex_map_regspec()
1047  *     we don't support mapping of I/O cards above 4Gb
1048  */
1049 static int
1050 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1051 {
1052 	ulong_t base;
1053 	void *cvaddr;
1054 	uint_t npages, pgoffset;
1055 	struct regspec *rp;
1056 	ddi_acc_hdl_t *hp;
1057 	ddi_acc_impl_t *ap;
1058 	uint_t	hat_acc_flags;
1059 
1060 	rp = mp->map_obj.rp;
1061 	hp = mp->map_handlep;
1062 
1063 #ifdef	DDI_MAP_DEBUG
1064 	ddi_map_debug(
1065 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1066 	    rp->regspec_bustype, rp->regspec_addr,
1067 	    rp->regspec_size, mp->map_handlep);
1068 #endif	/* DDI_MAP_DEBUG */
1069 
1070 	/*
1071 	 * I/O or memory mapping
1072 	 *
1073 	 *	<bustype=0, addr=x, len=x>: memory
1074 	 *	<bustype=1, addr=x, len=x>: i/o
1075 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1076 	 */
1077 
1078 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1079 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1080 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1081 		    rp->regspec_addr, rp->regspec_size);
1082 		return (DDI_FAILURE);
1083 	}
1084 
1085 	if (rp->regspec_bustype != 0) {
1086 		/*
1087 		 * I/O space - needs a handle.
1088 		 */
1089 		if (hp == NULL) {
1090 			return (DDI_FAILURE);
1091 		}
1092 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1093 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1094 		impl_acc_hdl_init(hp);
1095 
1096 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1097 #ifdef  DDI_MAP_DEBUG
1098 			ddi_map_debug("rootnex_map_regspec: mmap() \
1099 to I/O space is not supported.\n");
1100 #endif  /* DDI_MAP_DEBUG */
1101 			return (DDI_ME_INVAL);
1102 		} else {
1103 			/*
1104 			 * 1275-compliant vs. compatibility i/o mapping
1105 			 */
1106 			*vaddrp =
1107 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
1108 				((caddr_t)(uintptr_t)rp->regspec_bustype) :
1109 				((caddr_t)(uintptr_t)rp->regspec_addr);
1110 		}
1111 
1112 #ifdef	DDI_MAP_DEBUG
1113 		ddi_map_debug(
1114 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1115 		    rp->regspec_size, *vaddrp);
1116 #endif	/* DDI_MAP_DEBUG */
1117 		return (DDI_SUCCESS);
1118 	}
1119 
1120 	/*
1121 	 * Memory space
1122 	 */
1123 
1124 	if (hp != NULL) {
1125 		/*
1126 		 * hat layer ignores
1127 		 * hp->ah_acc.devacc_attr_endian_flags.
1128 		 */
1129 		switch (hp->ah_acc.devacc_attr_dataorder) {
1130 		case DDI_STRICTORDER_ACC:
1131 			hat_acc_flags = HAT_STRICTORDER;
1132 			break;
1133 		case DDI_UNORDERED_OK_ACC:
1134 			hat_acc_flags = HAT_UNORDERED_OK;
1135 			break;
1136 		case DDI_MERGING_OK_ACC:
1137 			hat_acc_flags = HAT_MERGING_OK;
1138 			break;
1139 		case DDI_LOADCACHING_OK_ACC:
1140 			hat_acc_flags = HAT_LOADCACHING_OK;
1141 			break;
1142 		case DDI_STORECACHING_OK_ACC:
1143 			hat_acc_flags = HAT_STORECACHING_OK;
1144 			break;
1145 		}
1146 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1147 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1148 		impl_acc_hdl_init(hp);
1149 		hp->ah_hat_flags = hat_acc_flags;
1150 	} else {
1151 		hat_acc_flags = HAT_STRICTORDER;
1152 	}
1153 
1154 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1155 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
1156 
1157 	if (rp->regspec_size == 0) {
1158 #ifdef  DDI_MAP_DEBUG
1159 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1160 #endif  /* DDI_MAP_DEBUG */
1161 		return (DDI_ME_INVAL);
1162 	}
1163 
1164 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1165 		*vaddrp = (caddr_t)mmu_btop(base);
1166 	} else {
1167 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1168 
1169 #ifdef	DDI_MAP_DEBUG
1170 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages \
1171 physical %x ",
1172 		    npages, base);
1173 #endif	/* DDI_MAP_DEBUG */
1174 
1175 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1176 		if (cvaddr == NULL)
1177 			return (DDI_ME_NORESOURCES);
1178 
1179 		/*
1180 		 * Now map in the pages we've allocated...
1181 		 */
1182 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
1183 		    mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK);
1184 		*vaddrp = (caddr_t)cvaddr + pgoffset;
1185 	}
1186 
1187 #ifdef	DDI_MAP_DEBUG
1188 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1189 #endif	/* DDI_MAP_DEBUG */
1190 	return (DDI_SUCCESS);
1191 }
1192 
1193 
1194 /*
1195  * rootnex_unmap_regspec()
1196  *
1197  */
1198 static int
1199 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1200 {
1201 	caddr_t addr = (caddr_t)*vaddrp;
1202 	uint_t npages, pgoffset;
1203 	struct regspec *rp;
1204 
1205 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1206 		return (0);
1207 
1208 	rp = mp->map_obj.rp;
1209 
1210 	if (rp->regspec_size == 0) {
1211 #ifdef  DDI_MAP_DEBUG
1212 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1213 #endif  /* DDI_MAP_DEBUG */
1214 		return (DDI_ME_INVAL);
1215 	}
1216 
1217 	/*
1218 	 * I/O or memory mapping:
1219 	 *
1220 	 *	<bustype=0, addr=x, len=x>: memory
1221 	 *	<bustype=1, addr=x, len=x>: i/o
1222 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1223 	 */
1224 	if (rp->regspec_bustype != 0) {
1225 		/*
1226 		 * This is I/O space, which requires no particular
1227 		 * processing on unmap since it isn't mapped in the
1228 		 * first place.
1229 		 */
1230 		return (DDI_SUCCESS);
1231 	}
1232 
1233 	/*
1234 	 * Memory space
1235 	 */
1236 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1237 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1238 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1239 	device_arena_free(addr - pgoffset, ptob(npages));
1240 
1241 	/*
1242 	 * Destroy the pointer - the mapping has logically gone
1243 	 */
1244 	*vaddrp = NULL;
1245 
1246 	return (DDI_SUCCESS);
1247 }
1248 
1249 
1250 /*
1251  * rootnex_map_handle()
1252  *
1253  */
1254 static int
1255 rootnex_map_handle(ddi_map_req_t *mp)
1256 {
1257 	ddi_acc_hdl_t *hp;
1258 	ulong_t base;
1259 	uint_t pgoffset;
1260 	struct regspec *rp;
1261 
1262 	rp = mp->map_obj.rp;
1263 
1264 #ifdef	DDI_MAP_DEBUG
1265 	ddi_map_debug(
1266 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1267 	    rp->regspec_bustype, rp->regspec_addr,
1268 	    rp->regspec_size, mp->map_handlep);
1269 #endif	/* DDI_MAP_DEBUG */
1270 
1271 	/*
1272 	 * I/O or memory mapping:
1273 	 *
1274 	 *	<bustype=0, addr=x, len=x>: memory
1275 	 *	<bustype=1, addr=x, len=x>: i/o
1276 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1277 	 */
1278 	if (rp->regspec_bustype != 0) {
1279 		/*
1280 		 * This refers to I/O space, and we don't support "mapping"
1281 		 * I/O space to a user.
1282 		 */
1283 		return (DDI_FAILURE);
1284 	}
1285 
1286 	/*
1287 	 * Set up the hat_flags for the mapping.
1288 	 */
1289 	hp = mp->map_handlep;
1290 
1291 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1292 	case DDI_NEVERSWAP_ACC:
1293 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1294 		break;
1295 	case DDI_STRUCTURE_LE_ACC:
1296 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1297 		break;
1298 	case DDI_STRUCTURE_BE_ACC:
1299 		return (DDI_FAILURE);
1300 	default:
1301 		return (DDI_REGS_ACC_CONFLICT);
1302 	}
1303 
1304 	switch (hp->ah_acc.devacc_attr_dataorder) {
1305 	case DDI_STRICTORDER_ACC:
1306 		break;
1307 	case DDI_UNORDERED_OK_ACC:
1308 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1309 		break;
1310 	case DDI_MERGING_OK_ACC:
1311 		hp->ah_hat_flags |= HAT_MERGING_OK;
1312 		break;
1313 	case DDI_LOADCACHING_OK_ACC:
1314 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1315 		break;
1316 	case DDI_STORECACHING_OK_ACC:
1317 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1318 		break;
1319 	default:
1320 		return (DDI_FAILURE);
1321 	}
1322 
1323 	base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */
1324 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */
1325 
1326 	if (rp->regspec_size == 0)
1327 		return (DDI_ME_INVAL);
1328 
1329 	hp->ah_pfn = mmu_btop(base);
1330 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1331 
1332 	return (DDI_SUCCESS);
1333 }
1334 
1335 
1336 
1337 /*
1338  * ************************
1339  *  interrupt related code
1340  * ************************
1341  */
1342 
1343 /*
1344  * rootnex_intr_ops()
1345  *	bus_intr_op() function for interrupt support
1346  */
1347 /* ARGSUSED */
1348 static int
1349 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1350     ddi_intr_handle_impl_t *hdlp, void *result)
1351 {
1352 	struct intrspec			*ispec;
1353 	struct ddi_parent_private_data	*pdp;
1354 
1355 	DDI_INTR_NEXDBG((CE_CONT,
1356 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1357 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
1358 
1359 	/* Process the interrupt operation */
1360 	switch (intr_op) {
1361 	case DDI_INTROP_GETCAP:
1362 		/* First check with pcplusmp */
1363 		if (psm_intr_ops == NULL)
1364 			return (DDI_FAILURE);
1365 
1366 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
1367 			*(int *)result = 0;
1368 			return (DDI_FAILURE);
1369 		}
1370 		break;
1371 	case DDI_INTROP_SETCAP:
1372 		if (psm_intr_ops == NULL)
1373 			return (DDI_FAILURE);
1374 
1375 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
1376 			return (DDI_FAILURE);
1377 		break;
1378 	case DDI_INTROP_ALLOC:
1379 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1380 			return (DDI_FAILURE);
1381 		hdlp->ih_pri = ispec->intrspec_pri;
1382 		*(int *)result = hdlp->ih_scratch1;
1383 		break;
1384 	case DDI_INTROP_FREE:
1385 		pdp = ddi_get_parent_data(rdip);
1386 		/*
1387 		 * Special case for 'pcic' driver' only.
1388 		 * If an intrspec was created for it, clean it up here
1389 		 * See detailed comments on this in the function
1390 		 * rootnex_get_ispec().
1391 		 */
1392 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1393 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
1394 			    pdp->par_nintr);
1395 			/*
1396 			 * Set it to zero; so that
1397 			 * DDI framework doesn't free it again
1398 			 */
1399 			pdp->par_intr = NULL;
1400 			pdp->par_nintr = 0;
1401 		}
1402 		break;
1403 	case DDI_INTROP_GETPRI:
1404 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1405 			return (DDI_FAILURE);
1406 		*(int *)result = ispec->intrspec_pri;
1407 		break;
1408 	case DDI_INTROP_SETPRI:
1409 		/* Validate the interrupt priority passed to us */
1410 		if (*(int *)result > LOCK_LEVEL)
1411 			return (DDI_FAILURE);
1412 
1413 		/* Ensure that PSM is all initialized and ispec is ok */
1414 		if ((psm_intr_ops == NULL) ||
1415 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
1416 			return (DDI_FAILURE);
1417 
1418 		/* Change the priority */
1419 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
1420 		    PSM_FAILURE)
1421 			return (DDI_FAILURE);
1422 
1423 		/* update the ispec with the new priority */
1424 		ispec->intrspec_pri =  *(int *)result;
1425 		break;
1426 	case DDI_INTROP_ADDISR:
1427 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1428 			return (DDI_FAILURE);
1429 		ispec->intrspec_func = hdlp->ih_cb_func;
1430 		break;
1431 	case DDI_INTROP_REMISR:
1432 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1433 			return (DDI_FAILURE);
1434 		ispec->intrspec_func = (uint_t (*)()) 0;
1435 		break;
1436 	case DDI_INTROP_ENABLE:
1437 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1438 			return (DDI_FAILURE);
1439 
1440 		/* Call psmi to translate irq with the dip */
1441 		if (psm_intr_ops == NULL)
1442 			return (DDI_FAILURE);
1443 
1444 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1445 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
1446 		    (int *)&hdlp->ih_vector);
1447 
1448 		/* Add the interrupt handler */
1449 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
1450 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1451 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
1452 			return (DDI_FAILURE);
1453 		break;
1454 	case DDI_INTROP_DISABLE:
1455 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1456 			return (DDI_FAILURE);
1457 
1458 		/* Call psm_ops() to translate irq with the dip */
1459 		if (psm_intr_ops == NULL)
1460 			return (DDI_FAILURE);
1461 
1462 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1463 		(void) (*psm_intr_ops)(rdip, hdlp,
1464 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
1465 
1466 		/* Remove the interrupt handler */
1467 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
1468 		    hdlp->ih_cb_func, hdlp->ih_vector);
1469 		break;
1470 	case DDI_INTROP_SETMASK:
1471 		if (psm_intr_ops == NULL)
1472 			return (DDI_FAILURE);
1473 
1474 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
1475 			return (DDI_FAILURE);
1476 		break;
1477 	case DDI_INTROP_CLRMASK:
1478 		if (psm_intr_ops == NULL)
1479 			return (DDI_FAILURE);
1480 
1481 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
1482 			return (DDI_FAILURE);
1483 		break;
1484 	case DDI_INTROP_GETPENDING:
1485 		if (psm_intr_ops == NULL)
1486 			return (DDI_FAILURE);
1487 
1488 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
1489 		    result)) {
1490 			*(int *)result = 0;
1491 			return (DDI_FAILURE);
1492 		}
1493 		break;
1494 	case DDI_INTROP_NINTRS:
1495 		if ((pdp = ddi_get_parent_data(rdip)) == NULL)
1496 			return (DDI_FAILURE);
1497 		*(int *)result = pdp->par_nintr;
1498 		if (pdp->par_nintr == 0) {
1499 			/*
1500 			 * Special case for 'pcic' driver' only. This driver
1501 			 * driver is a child of 'isa' and 'rootnex' drivers.
1502 			 *
1503 			 * See detailed comments on this in the function
1504 			 * rootnex_get_ispec().
1505 			 *
1506 			 * Children of 'pcic' send 'NINITR' request all the
1507 			 * way to rootnex driver. But, the 'pdp->par_nintr'
1508 			 * field may not initialized. So, we fake it here
1509 			 * to return 1 (a la what PCMCIA nexus does).
1510 			 */
1511 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
1512 				*(int *)result = 1;
1513 		}
1514 		break;
1515 	case DDI_INTROP_SUPPORTED_TYPES:
1516 		*(int *)result = 0;
1517 		*(int *)result |= DDI_INTR_TYPE_FIXED;	/* Always ... */
1518 		break;
1519 	case DDI_INTROP_NAVAIL:
1520 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1521 			return (DDI_FAILURE);
1522 
1523 		if (psm_intr_ops == NULL) {
1524 			*(int *)result = 1;
1525 			break;
1526 		}
1527 
1528 		/* Priority in the handle not initialized yet */
1529 		hdlp->ih_pri = ispec->intrspec_pri;
1530 		(void) (*psm_intr_ops)(rdip, hdlp,
1531 		    PSM_INTR_OP_NAVAIL_VECTORS, result);
1532 		break;
1533 	default:
1534 		return (DDI_FAILURE);
1535 	}
1536 
1537 	return (DDI_SUCCESS);
1538 }
1539 
1540 
1541 /*
1542  * rootnex_get_ispec()
1543  *	convert an interrupt number to an interrupt specification.
1544  *	The interrupt number determines which interrupt spec will be
1545  *	returned if more than one exists.
1546  *
1547  *	Look into the parent private data area of the 'rdip' to find out
1548  *	the interrupt specification.  First check to make sure there is
1549  *	one that matchs "inumber" and then return a pointer to it.
1550  *
1551  *	Return NULL if one could not be found.
1552  *
1553  *	NOTE: This is needed for rootnex_intr_ops()
1554  */
1555 static struct intrspec *
1556 rootnex_get_ispec(dev_info_t *rdip, int inum)
1557 {
1558 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1559 
1560 	/*
1561 	 * Special case handling for drivers that provide their own
1562 	 * intrspec structures instead of relying on the DDI framework.
1563 	 *
1564 	 * A broken hardware driver in ON could potentially provide its
1565 	 * own intrspec structure, instead of relying on the hardware.
1566 	 * If these drivers are children of 'rootnex' then we need to
1567 	 * continue to provide backward compatibility to them here.
1568 	 *
1569 	 * Following check is a special case for 'pcic' driver which
1570 	 * was found to have broken hardwre andby provides its own intrspec.
1571 	 *
1572 	 * Verbatim comments from this driver are shown here:
1573 	 * "Don't use the ddi_add_intr since we don't have a
1574 	 * default intrspec in all cases."
1575 	 *
1576 	 * Since an 'ispec' may not be always created for it,
1577 	 * check for that and create one if so.
1578 	 *
1579 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1580 	 */
1581 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1582 		pdp->par_nintr = 1;
1583 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1584 		    pdp->par_nintr, KM_SLEEP);
1585 	}
1586 
1587 	/* Validate the interrupt number */
1588 	if (inum >= pdp->par_nintr)
1589 		return (NULL);
1590 
1591 	/* Get the interrupt structure pointer and return that */
1592 	return ((struct intrspec *)&pdp->par_intr[inum]);
1593 }
1594 
1595 
1596 /*
1597  * ******************
1598  *  dma related code
1599  * ******************
1600  */
1601 
1602 /*
1603  * rootnex_dma_allochdl()
1604  *    called from ddi_dma_alloc_handle().
1605  */
1606 /*ARGSUSED*/
1607 static int
1608 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1609     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1610 {
1611 	uint64_t maxsegmentsize_ll;
1612 	uint_t maxsegmentsize;
1613 	ddi_dma_impl_t *hp;
1614 	rootnex_dma_t *dma;
1615 	uint64_t count_max;
1616 	uint64_t seg;
1617 	int kmflag;
1618 	int e;
1619 
1620 
1621 	/* convert our sleep flags */
1622 	if (waitfp == DDI_DMA_SLEEP) {
1623 		kmflag = KM_SLEEP;
1624 	} else {
1625 		kmflag = KM_NOSLEEP;
1626 	}
1627 
1628 	/*
1629 	 * We try to do only one memory allocation here. We'll do a little
1630 	 * pointer manipulation later. If the bind ends up taking more than
1631 	 * our prealloc's space, we'll have to allocate more memory in the
1632 	 * bind operation. Not great, but much better than before and the
1633 	 * best we can do with the current bind interfaces.
1634 	 */
1635 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1636 	if (hp == NULL) {
1637 		if (waitfp != DDI_DMA_DONTWAIT) {
1638 			ddi_set_callback(waitfp, arg,
1639 			    &rootnex_state->r_dvma_call_list_id);
1640 		}
1641 		return (DDI_DMA_NORESOURCES);
1642 	}
1643 
1644 	/* Do our pointer manipulation now, align the structures */
1645 	hp->dmai_private = (void *)(((uintptr_t)hp +
1646 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1647 	dma = (rootnex_dma_t *)hp->dmai_private;
1648 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1649 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1650 
1651 	/* setup the handle */
1652 	rootnex_clean_dmahdl(hp);
1653 	dma->dp_dip = rdip;
1654 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1655 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1656 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1657 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1658 	hp->dmai_rdip = rdip;
1659 	hp->dmai_attr = *attr;
1660 
1661 	/* we don't need to worry about the SPL since we do a tryenter */
1662 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1663 
1664 	/*
1665 	 * Figure out our maximum segment size. If the segment size is greater
1666 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1667 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1668 	 * dma_attr_count_max are size-1 type values.
1669 	 *
1670 	 * Maximum segment size is the largest physically contiguous chunk of
1671 	 * memory that we can return from a bind (i.e. the maximum size of a
1672 	 * single cookie).
1673 	 */
1674 
1675 	/* handle the rollover cases */
1676 	seg = attr->dma_attr_seg + 1;
1677 	if (seg < attr->dma_attr_seg) {
1678 		seg = attr->dma_attr_seg;
1679 	}
1680 	count_max = attr->dma_attr_count_max + 1;
1681 	if (count_max < attr->dma_attr_count_max) {
1682 		count_max = attr->dma_attr_count_max;
1683 	}
1684 
1685 	/*
1686 	 * granularity may or may not be a power of two. If it isn't, we can't
1687 	 * use a simple mask.
1688 	 */
1689 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1690 		dma->dp_granularity_power_2 = B_FALSE;
1691 	} else {
1692 		dma->dp_granularity_power_2 = B_TRUE;
1693 	}
1694 
1695 	/*
1696 	 * maxxfer should be a whole multiple of granularity. If we're going to
1697 	 * break up a window because we're greater than maxxfer, we might as
1698 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1699 	 * worry about triming the window later on for this case.
1700 	 */
1701 	if (attr->dma_attr_granular > 1) {
1702 		if (dma->dp_granularity_power_2) {
1703 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1704 			    (attr->dma_attr_maxxfer &
1705 			    (attr->dma_attr_granular - 1));
1706 		} else {
1707 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1708 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1709 		}
1710 	} else {
1711 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1712 	}
1713 
1714 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1715 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1716 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1717 		maxsegmentsize = 0xFFFFFFFF;
1718 	} else {
1719 		maxsegmentsize = maxsegmentsize_ll;
1720 	}
1721 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1722 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1723 
1724 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1725 	if (rootnex_alloc_check_parms) {
1726 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1727 		if (e != DDI_SUCCESS) {
1728 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1729 			(void) rootnex_dma_freehdl(dip, rdip,
1730 			    (ddi_dma_handle_t)hp);
1731 			return (e);
1732 		}
1733 	}
1734 
1735 	*handlep = (ddi_dma_handle_t)hp;
1736 
1737 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1738 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
1739 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1740 
1741 	return (DDI_SUCCESS);
1742 }
1743 
1744 
1745 /*
1746  * rootnex_dma_freehdl()
1747  *    called from ddi_dma_free_handle().
1748  */
1749 /*ARGSUSED*/
1750 static int
1751 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1752 {
1753 	ddi_dma_impl_t *hp;
1754 	rootnex_dma_t *dma;
1755 
1756 
1757 	hp = (ddi_dma_impl_t *)handle;
1758 	dma = (rootnex_dma_t *)hp->dmai_private;
1759 
1760 	/* unbind should have been called first */
1761 	ASSERT(!dma->dp_inuse);
1762 
1763 	mutex_destroy(&dma->dp_mutex);
1764 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1765 
1766 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1767 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
1768 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1769 
1770 	if (rootnex_state->r_dvma_call_list_id)
1771 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1772 
1773 	return (DDI_SUCCESS);
1774 }
1775 
1776 
1777 /*
1778  * rootnex_dma_bindhdl()
1779  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
1780  */
1781 /*ARGSUSED*/
1782 static int
1783 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1784     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1785 {
1786 	rootnex_sglinfo_t *sinfo;
1787 	ddi_dma_attr_t *attr;
1788 	ddi_dma_impl_t *hp;
1789 	rootnex_dma_t *dma;
1790 	int kmflag;
1791 	int e;
1792 
1793 
1794 	hp = (ddi_dma_impl_t *)handle;
1795 	dma = (rootnex_dma_t *)hp->dmai_private;
1796 	sinfo = &dma->dp_sglinfo;
1797 	attr = &hp->dmai_attr;
1798 
1799 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1800 
1801 	/*
1802 	 * This is useful for debugging a driver. Not as useful in a production
1803 	 * system. The only time this will fail is if you have a driver bug.
1804 	 */
1805 	if (rootnex_bind_check_inuse) {
1806 		/*
1807 		 * No one else should ever have this lock unless someone else
1808 		 * is trying to use this handle. So contention on the lock
1809 		 * is the same as inuse being set.
1810 		 */
1811 		e = mutex_tryenter(&dma->dp_mutex);
1812 		if (e == 0) {
1813 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1814 			return (DDI_DMA_INUSE);
1815 		}
1816 		if (dma->dp_inuse) {
1817 			mutex_exit(&dma->dp_mutex);
1818 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1819 			return (DDI_DMA_INUSE);
1820 		}
1821 		dma->dp_inuse = B_TRUE;
1822 		mutex_exit(&dma->dp_mutex);
1823 	}
1824 
1825 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1826 	if (rootnex_bind_check_parms) {
1827 		e = rootnex_valid_bind_parms(dmareq, attr);
1828 		if (e != DDI_SUCCESS) {
1829 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1830 			rootnex_clean_dmahdl(hp);
1831 			return (e);
1832 		}
1833 	}
1834 
1835 	/* save away the original bind info */
1836 	dma->dp_dma = dmareq->dmar_object;
1837 
1838 	/*
1839 	 * Figure out a rough estimate of what maximum number of pages this
1840 	 * buffer could use (a high estimate of course).
1841 	 */
1842 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1843 
1844 	/*
1845 	 * We'll use the pre-allocated cookies for any bind that will *always*
1846 	 * fit (more important to be consistent, we don't want to create
1847 	 * additional degenerate cases).
1848 	 */
1849 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1850 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1851 		dma->dp_need_to_free_cookie = B_FALSE;
1852 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1853 		    uint_t, sinfo->si_max_pages);
1854 
1855 	/*
1856 	 * For anything larger than that, we'll go ahead and allocate the
1857 	 * maximum number of pages we expect to see. Hopefuly, we won't be
1858 	 * seeing this path in the fast path for high performance devices very
1859 	 * frequently.
1860 	 *
1861 	 * a ddi bind interface that allowed the driver to provide storage to
1862 	 * the bind interface would speed this case up.
1863 	 */
1864 	} else {
1865 		/* convert the sleep flags */
1866 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1867 			kmflag =  KM_SLEEP;
1868 		} else {
1869 			kmflag =  KM_NOSLEEP;
1870 		}
1871 
1872 		/*
1873 		 * Save away how much memory we allocated. If we're doing a
1874 		 * nosleep, the alloc could fail...
1875 		 */
1876 		dma->dp_cookie_size = sinfo->si_max_pages *
1877 		    sizeof (ddi_dma_cookie_t);
1878 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
1879 		if (dma->dp_cookies == NULL) {
1880 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1881 			rootnex_clean_dmahdl(hp);
1882 			return (DDI_DMA_NORESOURCES);
1883 		}
1884 		dma->dp_need_to_free_cookie = B_TRUE;
1885 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
1886 		    sinfo->si_max_pages);
1887 	}
1888 	hp->dmai_cookie = dma->dp_cookies;
1889 
1890 	/*
1891 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
1892 	 * looking at the contraints in the dma structure. It will then put some
1893 	 * additional state about the sgl in the dma struct (i.e. is the sgl
1894 	 * clean, or do we need to do some munging; how many pages need to be
1895 	 * copied, etc.)
1896 	 */
1897 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
1898 	    &dma->dp_sglinfo);
1899 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
1900 
1901 	/* if we don't need a copy buffer, we don't need to sync */
1902 	if (sinfo->si_copybuf_req == 0) {
1903 		hp->dmai_rflags |= DMP_NOSYNC;
1904 	}
1905 
1906 	/*
1907 	 * if we don't need the copybuf and we don't need to do a partial,  we
1908 	 * hit the fast path. All the high performance devices should be trying
1909 	 * to hit this path. To hit this path, a device should be able to reach
1910 	 * all of memory, shouldn't try to bind more than it can transfer, and
1911 	 * the buffer shouldn't require more cookies than the driver/device can
1912 	 * handle [sgllen]).
1913 	 */
1914 	if ((sinfo->si_copybuf_req == 0) &&
1915 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
1916 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
1917 		/*
1918 		 * copy out the first cookie and ccountp, set the cookie
1919 		 * pointer to the second cookie. The first cookie is passed
1920 		 * back on the stack. Additional cookies are accessed via
1921 		 * ddi_dma_nextcookie()
1922 		 */
1923 		*cookiep = dma->dp_cookies[0];
1924 		*ccountp = sinfo->si_sgl_size;
1925 		hp->dmai_cookie++;
1926 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1927 		hp->dmai_nwin = 1;
1928 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1929 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
1930 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
1931 		    dma->dp_dma.dmao_size);
1932 		return (DDI_DMA_MAPPED);
1933 	}
1934 
1935 	/*
1936 	 * go to the slow path, we may need to alloc more memory, create
1937 	 * multiple windows, and munge up a sgl to make the device happy.
1938 	 */
1939 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
1940 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
1941 		if (dma->dp_need_to_free_cookie) {
1942 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
1943 		}
1944 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1945 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
1946 		return (e);
1947 	}
1948 
1949 	/* if the first window uses the copy buffer, sync it for the device */
1950 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
1951 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
1952 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
1953 		    DDI_DMA_SYNC_FORDEV);
1954 	}
1955 
1956 	/*
1957 	 * copy out the first cookie and ccountp, set the cookie pointer to the
1958 	 * second cookie. Make sure the partial flag is set/cleared correctly.
1959 	 * If we have a partial map (i.e. multiple windows), the number of
1960 	 * cookies we return is the number of cookies in the first window.
1961 	 */
1962 	if (e == DDI_DMA_MAPPED) {
1963 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1964 		*ccountp = sinfo->si_sgl_size;
1965 	} else {
1966 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
1967 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
1968 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
1969 	}
1970 	*cookiep = dma->dp_cookies[0];
1971 	hp->dmai_cookie++;
1972 
1973 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1974 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
1975 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
1976 	    dma->dp_dma.dmao_size);
1977 	return (e);
1978 }
1979 
1980 
1981 /*
1982  * rootnex_dma_unbindhdl()
1983  *    called from ddi_dma_unbind_handle()
1984  */
1985 /*ARGSUSED*/
1986 static int
1987 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1988     ddi_dma_handle_t handle)
1989 {
1990 	ddi_dma_impl_t *hp;
1991 	rootnex_dma_t *dma;
1992 	int e;
1993 
1994 
1995 	hp = (ddi_dma_impl_t *)handle;
1996 	dma = (rootnex_dma_t *)hp->dmai_private;
1997 
1998 	/* make sure the buffer wasn't free'd before calling unbind */
1999 	if (rootnex_unbind_verify_buffer) {
2000 		e = rootnex_verify_buffer(dma);
2001 		if (e != DDI_SUCCESS) {
2002 			ASSERT(0);
2003 			return (DDI_FAILURE);
2004 		}
2005 	}
2006 
2007 	/* sync the current window before unbinding the buffer */
2008 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2009 	    (hp->dmai_rflags & DDI_DMA_READ)) {
2010 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2011 		    DDI_DMA_SYNC_FORCPU);
2012 	}
2013 
2014 	/*
2015 	 * cleanup and copy buffer or window state. if we didn't use the copy
2016 	 * buffer or windows, there won't be much to do :-)
2017 	 */
2018 	rootnex_teardown_copybuf(dma);
2019 	rootnex_teardown_windows(dma);
2020 
2021 	/*
2022 	 * If we had to allocate space to for the worse case sgl (it didn't
2023 	 * fit into our pre-allocate buffer), free that up now
2024 	 */
2025 	if (dma->dp_need_to_free_cookie) {
2026 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2027 	}
2028 
2029 	/*
2030 	 * clean up the handle so it's ready for the next bind (i.e. if the
2031 	 * handle is reused).
2032 	 */
2033 	rootnex_clean_dmahdl(hp);
2034 
2035 	if (rootnex_state->r_dvma_call_list_id)
2036 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2037 
2038 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2039 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
2040 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2041 
2042 	return (DDI_SUCCESS);
2043 }
2044 
2045 
2046 /*
2047  * rootnex_verify_buffer()
2048  *   verify buffer wasn't free'd
2049  */
2050 static int
2051 rootnex_verify_buffer(rootnex_dma_t *dma)
2052 {
2053 	peekpoke_ctlops_t peek;
2054 	page_t **pplist;
2055 	caddr_t vaddr;
2056 	uint_t pcnt;
2057 	uint_t poff;
2058 	page_t *pp;
2059 	uint8_t b;
2060 	int i;
2061 	int e;
2062 
2063 
2064 	/* Figure out how many pages this buffer occupies */
2065 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2066 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2067 	} else {
2068 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2069 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2070 	}
2071 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2072 
2073 	switch (dma->dp_dma.dmao_type) {
2074 	case DMA_OTYP_PAGES:
2075 		/*
2076 		 * for a linked list of pp's walk through them to make sure
2077 		 * they're locked and not free.
2078 		 */
2079 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2080 		for (i = 0; i < pcnt; i++) {
2081 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2082 				return (DDI_FAILURE);
2083 			}
2084 			pp = pp->p_next;
2085 		}
2086 		break;
2087 
2088 	case DMA_OTYP_VADDR:
2089 	case DMA_OTYP_BUFVADDR:
2090 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2091 		/*
2092 		 * for an array of pp's walk through them to make sure they're
2093 		 * not free. It's possible that they may not be locked.
2094 		 */
2095 		if (pplist) {
2096 			for (i = 0; i < pcnt; i++) {
2097 				if (PP_ISFREE(pplist[i])) {
2098 					return (DDI_FAILURE);
2099 				}
2100 			}
2101 
2102 		/* For a virtual address, try to peek at each page */
2103 		} else {
2104 			if (dma->dp_sglinfo.si_asp == &kas) {
2105 				bzero(&peek, sizeof (peekpoke_ctlops_t));
2106 				peek.host_addr = (uintptr_t)&b;
2107 				peek.size = sizeof (uint8_t);
2108 				peek.dev_addr = (uintptr_t)vaddr;
2109 				for (i = 0; i < pcnt; i++) {
2110 					e = rootnex_ctlops_peek(&peek, &b);
2111 					if (e != DDI_SUCCESS) {
2112 						return (DDI_FAILURE);
2113 					}
2114 					peek.dev_addr += MMU_PAGESIZE;
2115 				}
2116 			}
2117 		}
2118 		break;
2119 
2120 	default:
2121 		ASSERT(0);
2122 		break;
2123 	}
2124 
2125 	return (DDI_SUCCESS);
2126 }
2127 
2128 
2129 /*
2130  * rootnex_clean_dmahdl()
2131  *    Clean the dma handle. This should be called on a handle alloc and an
2132  *    unbind handle. Set the handle state to the default settings.
2133  */
2134 static void
2135 rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2136 {
2137 	rootnex_dma_t *dma;
2138 
2139 
2140 	dma = (rootnex_dma_t *)hp->dmai_private;
2141 
2142 	hp->dmai_nwin = 0;
2143 	dma->dp_current_cookie = 0;
2144 	dma->dp_copybuf_size = 0;
2145 	dma->dp_window = NULL;
2146 	dma->dp_cbaddr = NULL;
2147 	dma->dp_inuse = B_FALSE;
2148 	dma->dp_need_to_free_cookie = B_FALSE;
2149 	dma->dp_need_to_free_window = B_FALSE;
2150 	dma->dp_partial_required = B_FALSE;
2151 	dma->dp_trim_required = B_FALSE;
2152 	dma->dp_sglinfo.si_copybuf_req = 0;
2153 #if !defined(__amd64)
2154 	dma->dp_cb_remaping = B_FALSE;
2155 	dma->dp_kva = NULL;
2156 #endif
2157 
2158 	/* FMA related initialization */
2159 	hp->dmai_fault = 0;
2160 	hp->dmai_fault_check = NULL;
2161 	hp->dmai_fault_notify = NULL;
2162 	hp->dmai_error.err_ena = 0;
2163 	hp->dmai_error.err_status = DDI_FM_OK;
2164 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2165 	hp->dmai_error.err_ontrap = NULL;
2166 	hp->dmai_error.err_fep = NULL;
2167 }
2168 
2169 
2170 /*
2171  * rootnex_valid_alloc_parms()
2172  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2173  */
2174 static int
2175 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2176 {
2177 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2178 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2179 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2180 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2181 		return (DDI_DMA_BADATTR);
2182 	}
2183 
2184 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2185 		return (DDI_DMA_BADATTR);
2186 	}
2187 
2188 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2189 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2190 	    attr->dma_attr_sgllen <= 0) {
2191 		return (DDI_DMA_BADATTR);
2192 	}
2193 
2194 	/* We should be able to DMA into every byte offset in a page */
2195 	if (maxsegmentsize < MMU_PAGESIZE) {
2196 		return (DDI_DMA_BADATTR);
2197 	}
2198 
2199 	return (DDI_SUCCESS);
2200 }
2201 
2202 
2203 /*
2204  * rootnex_valid_bind_parms()
2205  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2206  */
2207 /* ARGSUSED */
2208 static int
2209 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2210 {
2211 #if !defined(__amd64)
2212 	/*
2213 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2214 	 * we can track the offset for the obsoleted interfaces.
2215 	 */
2216 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2217 		return (DDI_DMA_TOOBIG);
2218 	}
2219 #endif
2220 
2221 	return (DDI_SUCCESS);
2222 }
2223 
2224 
2225 /*
2226  * rootnex_get_sgl()
2227  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2228  *    with a call to the vm layer when vm2.0 comes around...
2229  */
2230 static void
2231 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2232     rootnex_sglinfo_t *sglinfo)
2233 {
2234 	ddi_dma_atyp_t buftype;
2235 	uint64_t last_page;
2236 	uint64_t offset;
2237 	uint64_t addrhi;
2238 	uint64_t addrlo;
2239 	uint64_t maxseg;
2240 	page_t **pplist;
2241 	uint64_t paddr;
2242 	uint32_t psize;
2243 	uint32_t size;
2244 	caddr_t vaddr;
2245 	uint_t pcnt;
2246 	page_t *pp;
2247 	uint_t cnt;
2248 
2249 
2250 	/* shortcuts */
2251 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2252 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2253 	maxseg = sglinfo->si_max_cookie_size;
2254 	buftype = dmar_object->dmao_type;
2255 	addrhi = sglinfo->si_max_addr;
2256 	addrlo = sglinfo->si_min_addr;
2257 	size = dmar_object->dmao_size;
2258 
2259 	pcnt = 0;
2260 	cnt = 0;
2261 
2262 	/*
2263 	 * if we were passed down a linked list of pages, i.e. pointer to
2264 	 * page_t, use this to get our physical address and buf offset.
2265 	 */
2266 	if (buftype == DMA_OTYP_PAGES) {
2267 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2268 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2269 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2270 		    MMU_PAGEOFFSET;
2271 		paddr = ptob64(pp->p_pagenum) + offset;
2272 		psize = MIN(size, (MMU_PAGESIZE - offset));
2273 		pp = pp->p_next;
2274 		sglinfo->si_asp = NULL;
2275 
2276 	/*
2277 	 * We weren't passed down a linked list of pages, but if we were passed
2278 	 * down an array of pages, use this to get our physical address and buf
2279 	 * offset.
2280 	 */
2281 	} else if (pplist != NULL) {
2282 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2283 		    (buftype == DMA_OTYP_BUFVADDR));
2284 
2285 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2286 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2287 		if (sglinfo->si_asp == NULL) {
2288 			sglinfo->si_asp = &kas;
2289 		}
2290 
2291 		ASSERT(!PP_ISFREE(pplist[pcnt]));
2292 		paddr = ptob64(pplist[pcnt]->p_pagenum);
2293 		paddr += offset;
2294 		psize = MIN(size, (MMU_PAGESIZE - offset));
2295 		pcnt++;
2296 
2297 	/*
2298 	 * All we have is a virtual address, we'll need to call into the VM
2299 	 * to get the physical address.
2300 	 */
2301 	} else {
2302 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2303 		    (buftype == DMA_OTYP_BUFVADDR));
2304 
2305 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2306 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2307 		if (sglinfo->si_asp == NULL) {
2308 			sglinfo->si_asp = &kas;
2309 		}
2310 
2311 		paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2312 		paddr += offset;
2313 		psize = MIN(size, (MMU_PAGESIZE - offset));
2314 		vaddr += psize;
2315 	}
2316 
2317 	/*
2318 	 * Setup the first cookie with the physical address of the page and the
2319 	 * size of the page (which takes into account the initial offset into
2320 	 * the page.
2321 	 */
2322 	sgl[cnt].dmac_laddress = paddr;
2323 	sgl[cnt].dmac_size = psize;
2324 	sgl[cnt].dmac_type = 0;
2325 
2326 	/*
2327 	 * Save away the buffer offset into the page. We'll need this later in
2328 	 * the copy buffer code to help figure out the page index within the
2329 	 * buffer and the offset into the current page.
2330 	 */
2331 	sglinfo->si_buf_offset = offset;
2332 
2333 	/*
2334 	 * If the DMA engine can't reach the physical address, increase how
2335 	 * much copy buffer we need. We always increase by pagesize so we don't
2336 	 * have to worry about converting offsets. Set a flag in the cookies
2337 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2338 	 * last cookie, go to the next cookie (since we separate each page which
2339 	 * uses the copy buffer in case the copy buffer is not physically
2340 	 * contiguous.
2341 	 */
2342 	if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2343 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2344 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2345 		if ((cnt + 1) < sglinfo->si_max_pages) {
2346 			cnt++;
2347 			sgl[cnt].dmac_laddress = 0;
2348 			sgl[cnt].dmac_size = 0;
2349 			sgl[cnt].dmac_type = 0;
2350 		}
2351 	}
2352 
2353 	/*
2354 	 * save this page's physical address so we can figure out if the next
2355 	 * page is physically contiguous. Keep decrementing size until we are
2356 	 * done with the buffer.
2357 	 */
2358 	last_page = paddr & MMU_PAGEMASK;
2359 	size -= psize;
2360 
2361 	while (size > 0) {
2362 		/* Get the size for this page (i.e. partial or full page) */
2363 		psize = MIN(size, MMU_PAGESIZE);
2364 
2365 		if (buftype == DMA_OTYP_PAGES) {
2366 			/* get the paddr from the page_t */
2367 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2368 			paddr = ptob64(pp->p_pagenum);
2369 			pp = pp->p_next;
2370 		} else if (pplist != NULL) {
2371 			/* index into the array of page_t's to get the paddr */
2372 			ASSERT(!PP_ISFREE(pplist[pcnt]));
2373 			paddr = ptob64(pplist[pcnt]->p_pagenum);
2374 			pcnt++;
2375 		} else {
2376 			/* call into the VM to get the paddr */
2377 			paddr =  ptob64(hat_getpfnum(sglinfo->si_asp->a_hat,
2378 			    vaddr));
2379 			vaddr += psize;
2380 		}
2381 
2382 		/* check to see if this page needs the copy buffer */
2383 		if ((paddr < addrlo) || ((paddr + psize) > addrhi)) {
2384 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2385 
2386 			/*
2387 			 * if there is something in the current cookie, go to
2388 			 * the next one. We only want one page in a cookie which
2389 			 * uses the copybuf since the copybuf doesn't have to
2390 			 * be physically contiguous.
2391 			 */
2392 			if (sgl[cnt].dmac_size != 0) {
2393 				cnt++;
2394 			}
2395 			sgl[cnt].dmac_laddress = paddr;
2396 			sgl[cnt].dmac_size = psize;
2397 #if defined(__amd64)
2398 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2399 #else
2400 			/*
2401 			 * save the buf offset for 32-bit kernel. used in the
2402 			 * obsoleted interfaces.
2403 			 */
2404 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2405 			    (dmar_object->dmao_size - size);
2406 #endif
2407 			/* if this isn't the last cookie, go to the next one */
2408 			if ((cnt + 1) < sglinfo->si_max_pages) {
2409 				cnt++;
2410 				sgl[cnt].dmac_laddress = 0;
2411 				sgl[cnt].dmac_size = 0;
2412 				sgl[cnt].dmac_type = 0;
2413 			}
2414 
2415 		/*
2416 		 * this page didn't need the copy buffer, if it's not physically
2417 		 * contiguous, or it would put us over a segment boundary, or it
2418 		 * puts us over the max cookie size, or the current sgl doesn't
2419 		 * have anything in it.
2420 		 */
2421 		} else if (((last_page + MMU_PAGESIZE) != paddr) ||
2422 		    !(paddr & sglinfo->si_segmask) ||
2423 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2424 		    (sgl[cnt].dmac_size == 0)) {
2425 			/*
2426 			 * if we're not already in a new cookie, go to the next
2427 			 * cookie.
2428 			 */
2429 			if (sgl[cnt].dmac_size != 0) {
2430 				cnt++;
2431 			}
2432 
2433 			/* save the cookie information */
2434 			sgl[cnt].dmac_laddress = paddr;
2435 			sgl[cnt].dmac_size = psize;
2436 #if defined(__amd64)
2437 			sgl[cnt].dmac_type = 0;
2438 #else
2439 			/*
2440 			 * save the buf offset for 32-bit kernel. used in the
2441 			 * obsoleted interfaces.
2442 			 */
2443 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2444 #endif
2445 
2446 		/*
2447 		 * this page didn't need the copy buffer, it is physically
2448 		 * contiguous with the last page, and it's <= the max cookie
2449 		 * size.
2450 		 */
2451 		} else {
2452 			sgl[cnt].dmac_size += psize;
2453 
2454 			/*
2455 			 * if this exactly ==  the maximum cookie size, and
2456 			 * it isn't the last cookie, go to the next cookie.
2457 			 */
2458 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2459 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2460 				cnt++;
2461 				sgl[cnt].dmac_laddress = 0;
2462 				sgl[cnt].dmac_size = 0;
2463 				sgl[cnt].dmac_type = 0;
2464 			}
2465 		}
2466 
2467 		/*
2468 		 * save this page's physical address so we can figure out if the
2469 		 * next page is physically contiguous. Keep decrementing size
2470 		 * until we are done with the buffer.
2471 		 */
2472 		last_page = paddr;
2473 		size -= psize;
2474 	}
2475 
2476 	/* we're done, save away how many cookies the sgl has */
2477 	if (sgl[cnt].dmac_size == 0) {
2478 		ASSERT(cnt < sglinfo->si_max_pages);
2479 		sglinfo->si_sgl_size = cnt;
2480 	} else {
2481 		sglinfo->si_sgl_size = cnt + 1;
2482 	}
2483 }
2484 
2485 
2486 /*
2487  * rootnex_bind_slowpath()
2488  *    Call in the bind path if the calling driver can't use the sgl without
2489  *    modifying it. We either need to use the copy buffer and/or we will end up
2490  *    with a partial bind.
2491  */
2492 static int
2493 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2494     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2495 {
2496 	rootnex_sglinfo_t *sinfo;
2497 	rootnex_window_t *window;
2498 	ddi_dma_cookie_t *cookie;
2499 	size_t copybuf_used;
2500 	size_t dmac_size;
2501 	boolean_t partial;
2502 	off_t cur_offset;
2503 	page_t *cur_pp;
2504 	major_t mnum;
2505 	int e;
2506 	int i;
2507 
2508 
2509 	sinfo = &dma->dp_sglinfo;
2510 	copybuf_used = 0;
2511 	partial = B_FALSE;
2512 
2513 	/*
2514 	 * If we're using the copybuf, set the copybuf state in dma struct.
2515 	 * Needs to be first since it sets the copy buffer size.
2516 	 */
2517 	if (sinfo->si_copybuf_req != 0) {
2518 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2519 		if (e != DDI_SUCCESS) {
2520 			return (e);
2521 		}
2522 	} else {
2523 		dma->dp_copybuf_size = 0;
2524 	}
2525 
2526 	/*
2527 	 * Figure out if we need to do a partial mapping. If so, figure out
2528 	 * if we need to trim the buffers when we munge the sgl.
2529 	 */
2530 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2531 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2532 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2533 		dma->dp_partial_required = B_TRUE;
2534 		if (attr->dma_attr_granular != 1) {
2535 			dma->dp_trim_required = B_TRUE;
2536 		}
2537 	} else {
2538 		dma->dp_partial_required = B_FALSE;
2539 		dma->dp_trim_required = B_FALSE;
2540 	}
2541 
2542 	/* If we need to do a partial bind, make sure the driver supports it */
2543 	if (dma->dp_partial_required &&
2544 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2545 
2546 		mnum = ddi_driver_major(dma->dp_dip);
2547 		/*
2548 		 * patchable which allows us to print one warning per major
2549 		 * number.
2550 		 */
2551 		if ((rootnex_bind_warn) &&
2552 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2553 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2554 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2555 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2556 			    "There is a small risk of data corruption in "
2557 			    "particular with large I/Os. The driver should be "
2558 			    "replaced with a corrected version for proper "
2559 			    "system operation. To disable this warning, add "
2560 			    "'set rootnex:rootnex_bind_warn=0' to "
2561 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2562 		}
2563 		return (DDI_DMA_TOOBIG);
2564 	}
2565 
2566 	/*
2567 	 * we might need multiple windows, setup state to handle them. In this
2568 	 * code path, we will have at least one window.
2569 	 */
2570 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2571 	if (e != DDI_SUCCESS) {
2572 		rootnex_teardown_copybuf(dma);
2573 		return (e);
2574 	}
2575 
2576 	window = &dma->dp_window[0];
2577 	cookie = &dma->dp_cookies[0];
2578 	cur_offset = 0;
2579 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2580 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2581 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2582 	}
2583 
2584 	/* loop though all the cookies we got back from get_sgl() */
2585 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2586 		/*
2587 		 * If we're using the copy buffer, check this cookie and setup
2588 		 * its associated copy buffer state. If this cookie uses the
2589 		 * copy buffer, make sure we sync this window during dma_sync.
2590 		 */
2591 		if (dma->dp_copybuf_size > 0) {
2592 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2593 			    cur_offset, &copybuf_used, &cur_pp);
2594 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2595 				window->wd_dosync = B_TRUE;
2596 			}
2597 		}
2598 
2599 		/*
2600 		 * save away the cookie size, since it could be modified in
2601 		 * the windowing code.
2602 		 */
2603 		dmac_size = cookie->dmac_size;
2604 
2605 		/* if we went over max copybuf size */
2606 		if (dma->dp_copybuf_size &&
2607 		    (copybuf_used > dma->dp_copybuf_size)) {
2608 			partial = B_TRUE;
2609 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2610 			    cookie, cur_offset, &copybuf_used);
2611 			if (e != DDI_SUCCESS) {
2612 				rootnex_teardown_copybuf(dma);
2613 				rootnex_teardown_windows(dma);
2614 				return (e);
2615 			}
2616 
2617 			/*
2618 			 * if the coookie uses the copy buffer, make sure the
2619 			 * new window we just moved to is set to sync.
2620 			 */
2621 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2622 				window->wd_dosync = B_TRUE;
2623 			}
2624 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2625 			    dma->dp_dip);
2626 
2627 		/* if the cookie cnt == max sgllen, move to the next window */
2628 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2629 			partial = B_TRUE;
2630 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2631 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2632 			    cookie, attr, cur_offset);
2633 			if (e != DDI_SUCCESS) {
2634 				rootnex_teardown_copybuf(dma);
2635 				rootnex_teardown_windows(dma);
2636 				return (e);
2637 			}
2638 
2639 			/*
2640 			 * if the coookie uses the copy buffer, make sure the
2641 			 * new window we just moved to is set to sync.
2642 			 */
2643 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2644 				window->wd_dosync = B_TRUE;
2645 			}
2646 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2647 			    dma->dp_dip);
2648 
2649 		/* else if we will be over maxxfer */
2650 		} else if ((window->wd_size + dmac_size) >
2651 		    dma->dp_maxxfer) {
2652 			partial = B_TRUE;
2653 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2654 			    cookie);
2655 			if (e != DDI_SUCCESS) {
2656 				rootnex_teardown_copybuf(dma);
2657 				rootnex_teardown_windows(dma);
2658 				return (e);
2659 			}
2660 
2661 			/*
2662 			 * if the coookie uses the copy buffer, make sure the
2663 			 * new window we just moved to is set to sync.
2664 			 */
2665 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2666 				window->wd_dosync = B_TRUE;
2667 			}
2668 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2669 			    dma->dp_dip);
2670 
2671 		/* else this cookie fits in the current window */
2672 		} else {
2673 			window->wd_cookie_cnt++;
2674 			window->wd_size += dmac_size;
2675 		}
2676 
2677 		/* track our offset into the buffer, go to the next cookie */
2678 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2679 		ASSERT(cookie->dmac_size <= dmac_size);
2680 		cur_offset += dmac_size;
2681 		cookie++;
2682 	}
2683 
2684 	/* if we ended up with a zero sized window in the end, clean it up */
2685 	if (window->wd_size == 0) {
2686 		hp->dmai_nwin--;
2687 		window--;
2688 	}
2689 
2690 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
2691 
2692 	if (!partial) {
2693 		return (DDI_DMA_MAPPED);
2694 	}
2695 
2696 	ASSERT(dma->dp_partial_required);
2697 	return (DDI_DMA_PARTIAL_MAP);
2698 }
2699 
2700 
2701 /*
2702  * rootnex_setup_copybuf()
2703  *    Called in bind slowpath. Figures out if we're going to use the copy
2704  *    buffer, and if we do, sets up the basic state to handle it.
2705  */
2706 static int
2707 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2708     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
2709 {
2710 	rootnex_sglinfo_t *sinfo;
2711 	ddi_dma_attr_t lattr;
2712 	size_t max_copybuf;
2713 	int cansleep;
2714 	int e;
2715 #if !defined(__amd64)
2716 	int vmflag;
2717 #endif
2718 
2719 
2720 	sinfo = &dma->dp_sglinfo;
2721 
2722 	/*
2723 	 * read this first so it's consistent through the routine so we can
2724 	 * patch it on the fly.
2725 	 */
2726 	max_copybuf = rootnex_max_copybuf_size & MMU_PAGEMASK;
2727 
2728 	/* We need to call into the rootnex on ddi_dma_sync() */
2729 	hp->dmai_rflags &= ~DMP_NOSYNC;
2730 
2731 	/* make sure the copybuf size <= the max size */
2732 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
2733 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
2734 
2735 #if !defined(__amd64)
2736 	/*
2737 	 * if we don't have kva space to copy to/from, allocate the KVA space
2738 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
2739 	 * the 64-bit kernel.
2740 	 */
2741 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
2742 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
2743 
2744 		/* convert the sleep flags */
2745 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2746 			vmflag = VM_SLEEP;
2747 		} else {
2748 			vmflag = VM_NOSLEEP;
2749 		}
2750 
2751 		/* allocate Kernel VA space that we can bcopy to/from */
2752 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
2753 		    vmflag);
2754 		if (dma->dp_kva == NULL) {
2755 			return (DDI_DMA_NORESOURCES);
2756 		}
2757 	}
2758 #endif
2759 
2760 	/* convert the sleep flags */
2761 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2762 		cansleep = 1;
2763 	} else {
2764 		cansleep = 0;
2765 	}
2766 
2767 	/*
2768 	 * Allocated the actual copy buffer. This needs to fit within the DMA
2769 	 * engines limits, so we can't use kmem_alloc...
2770 	 */
2771 	lattr = *attr;
2772 	lattr.dma_attr_align = MMU_PAGESIZE;
2773 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
2774 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
2775 	if (e != DDI_SUCCESS) {
2776 #if !defined(__amd64)
2777 		if (dma->dp_kva != NULL) {
2778 			vmem_free(heap_arena, dma->dp_kva,
2779 			    dma->dp_copybuf_size);
2780 		}
2781 #endif
2782 		return (DDI_DMA_NORESOURCES);
2783 	}
2784 
2785 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
2786 	    size_t, dma->dp_copybuf_size);
2787 
2788 	return (DDI_SUCCESS);
2789 }
2790 
2791 
2792 /*
2793  * rootnex_setup_windows()
2794  *    Called in bind slowpath to setup the window state. We always have windows
2795  *    in the slowpath. Even if the window count = 1.
2796  */
2797 static int
2798 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2799     ddi_dma_attr_t *attr, int kmflag)
2800 {
2801 	rootnex_window_t *windowp;
2802 	rootnex_sglinfo_t *sinfo;
2803 	size_t copy_state_size;
2804 	size_t win_state_size;
2805 	size_t state_available;
2806 	size_t space_needed;
2807 	uint_t copybuf_win;
2808 	uint_t maxxfer_win;
2809 	size_t space_used;
2810 	uint_t sglwin;
2811 
2812 
2813 	sinfo = &dma->dp_sglinfo;
2814 
2815 	dma->dp_current_win = 0;
2816 	hp->dmai_nwin = 0;
2817 
2818 	/* If we don't need to do a partial, we only have one window */
2819 	if (!dma->dp_partial_required) {
2820 		dma->dp_max_win = 1;
2821 
2822 	/*
2823 	 * we need multiple windows, need to figure out the worse case number
2824 	 * of windows.
2825 	 */
2826 	} else {
2827 		/*
2828 		 * if we need windows because we need more copy buffer that
2829 		 * we allow, the worse case number of windows we could need
2830 		 * here would be (copybuf space required / copybuf space that
2831 		 * we have) plus one for remainder, and plus 2 to handle the
2832 		 * extra pages on the trim for the first and last pages of the
2833 		 * buffer (a page is the minimum window size so under the right
2834 		 * attr settings, you could have a window for each page).
2835 		 * The last page will only be hit here if the size is not a
2836 		 * multiple of the granularity (which theoretically shouldn't
2837 		 * be the case but never has been enforced, so we could have
2838 		 * broken things without it).
2839 		 */
2840 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
2841 			ASSERT(dma->dp_copybuf_size > 0);
2842 			copybuf_win = (sinfo->si_copybuf_req /
2843 			    dma->dp_copybuf_size) + 1 + 2;
2844 		} else {
2845 			copybuf_win = 0;
2846 		}
2847 
2848 		/*
2849 		 * if we need windows because we have more cookies than the H/W
2850 		 * can handle, the number of windows we would need here would
2851 		 * be (cookie count / cookies count H/W supports) plus one for
2852 		 * remainder, and plus 2 to handle the extra pages on the trim
2853 		 * (see above comment about trim)
2854 		 */
2855 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
2856 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
2857 			    + 1) + 2;
2858 		} else {
2859 			sglwin = 0;
2860 		}
2861 
2862 		/*
2863 		 * if we need windows because we're binding more memory than the
2864 		 * H/W can transfer at once, the number of windows we would need
2865 		 * here would be (xfer count / max xfer H/W supports) plus one
2866 		 * for remainder, and plus 2 to handle the extra pages on the
2867 		 * trim (see above comment about trim)
2868 		 */
2869 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
2870 			maxxfer_win = (dma->dp_dma.dmao_size /
2871 			    dma->dp_maxxfer) + 1 + 2;
2872 		} else {
2873 			maxxfer_win = 0;
2874 		}
2875 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
2876 		ASSERT(dma->dp_max_win > 0);
2877 	}
2878 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
2879 
2880 	/*
2881 	 * Get space for window and potential copy buffer state. Before we
2882 	 * go and allocate memory, see if we can get away with using what's
2883 	 * left in the pre-allocted state or the dynamically allocated sgl.
2884 	 */
2885 	space_used = (uintptr_t)(sinfo->si_sgl_size *
2886 	    sizeof (ddi_dma_cookie_t));
2887 
2888 	/* if we dynamically allocated space for the cookies */
2889 	if (dma->dp_need_to_free_cookie) {
2890 		/* if we have more space in the pre-allocted buffer, use it */
2891 		ASSERT(space_used <= dma->dp_cookie_size);
2892 		if ((dma->dp_cookie_size - space_used) <=
2893 		    rootnex_state->r_prealloc_size) {
2894 			state_available = rootnex_state->r_prealloc_size;
2895 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
2896 
2897 		/*
2898 		 * else, we have more free space in the dynamically allocated
2899 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
2900 		 * didn't need a lot of cookies.
2901 		 */
2902 		} else {
2903 			state_available = dma->dp_cookie_size - space_used;
2904 			windowp = (rootnex_window_t *)
2905 			    &dma->dp_cookies[sinfo->si_sgl_size];
2906 		}
2907 
2908 	/* we used the pre-alloced buffer */
2909 	} else {
2910 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
2911 		state_available = rootnex_state->r_prealloc_size - space_used;
2912 		windowp = (rootnex_window_t *)
2913 		    &dma->dp_cookies[sinfo->si_sgl_size];
2914 	}
2915 
2916 	/*
2917 	 * figure out how much state we need to track the copy buffer. Add an
2918 	 * addition 8 bytes for pointer alignemnt later.
2919 	 */
2920 	if (dma->dp_copybuf_size > 0) {
2921 		copy_state_size = sinfo->si_max_pages *
2922 		    sizeof (rootnex_pgmap_t);
2923 	} else {
2924 		copy_state_size = 0;
2925 	}
2926 	/* add an additional 8 bytes for pointer alignment */
2927 	space_needed = win_state_size + copy_state_size + 0x8;
2928 
2929 	/* if we have enough space already, use it */
2930 	if (state_available >= space_needed) {
2931 		dma->dp_window = windowp;
2932 		dma->dp_need_to_free_window = B_FALSE;
2933 
2934 	/* not enough space, need to allocate more. */
2935 	} else {
2936 		dma->dp_window = kmem_alloc(space_needed, kmflag);
2937 		if (dma->dp_window == NULL) {
2938 			return (DDI_DMA_NORESOURCES);
2939 		}
2940 		dma->dp_need_to_free_window = B_TRUE;
2941 		dma->dp_window_size = space_needed;
2942 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
2943 		    dma->dp_dip, size_t, space_needed);
2944 	}
2945 
2946 	/*
2947 	 * we allocate copy buffer state and window state at the same time.
2948 	 * setup our copy buffer state pointers. Make sure it's aligned.
2949 	 */
2950 	if (dma->dp_copybuf_size > 0) {
2951 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
2952 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
2953 
2954 #if !defined(__amd64)
2955 		/*
2956 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
2957 		 * false/NULL. Should be quicker to bzero vs loop and set.
2958 		 */
2959 		bzero(dma->dp_pgmap, copy_state_size);
2960 #endif
2961 	} else {
2962 		dma->dp_pgmap = NULL;
2963 	}
2964 
2965 	return (DDI_SUCCESS);
2966 }
2967 
2968 
2969 /*
2970  * rootnex_teardown_copybuf()
2971  *    cleans up after rootnex_setup_copybuf()
2972  */
2973 static void
2974 rootnex_teardown_copybuf(rootnex_dma_t *dma)
2975 {
2976 #if !defined(__amd64)
2977 	int i;
2978 
2979 	/*
2980 	 * if we allocated kernel heap VMEM space, go through all the pages and
2981 	 * map out any of the ones that we're mapped into the kernel heap VMEM
2982 	 * arena. Then free the VMEM space.
2983 	 */
2984 	if (dma->dp_kva != NULL) {
2985 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
2986 			if (dma->dp_pgmap[i].pm_mapped) {
2987 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
2988 				    MMU_PAGESIZE, HAT_UNLOAD);
2989 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
2990 			}
2991 		}
2992 
2993 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
2994 	}
2995 
2996 #endif
2997 
2998 	/* if we allocated a copy buffer, free it */
2999 	if (dma->dp_cbaddr != NULL) {
3000 		i_ddi_mem_free(dma->dp_cbaddr, 0);
3001 	}
3002 }
3003 
3004 
3005 /*
3006  * rootnex_teardown_windows()
3007  *    cleans up after rootnex_setup_windows()
3008  */
3009 static void
3010 rootnex_teardown_windows(rootnex_dma_t *dma)
3011 {
3012 	/*
3013 	 * if we had to allocate window state on the last bind (because we
3014 	 * didn't have enough pre-allocated space in the handle), free it.
3015 	 */
3016 	if (dma->dp_need_to_free_window) {
3017 		kmem_free(dma->dp_window, dma->dp_window_size);
3018 	}
3019 }
3020 
3021 
3022 /*
3023  * rootnex_init_win()
3024  *    Called in bind slow path during creation of a new window. Initializes
3025  *    window state to default values.
3026  */
3027 /*ARGSUSED*/
3028 static void
3029 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3030     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3031 {
3032 	hp->dmai_nwin++;
3033 	window->wd_dosync = B_FALSE;
3034 	window->wd_offset = cur_offset;
3035 	window->wd_size = 0;
3036 	window->wd_first_cookie = cookie;
3037 	window->wd_cookie_cnt = 0;
3038 	window->wd_trim.tr_trim_first = B_FALSE;
3039 	window->wd_trim.tr_trim_last = B_FALSE;
3040 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3041 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3042 #if !defined(__amd64)
3043 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3044 #endif
3045 }
3046 
3047 
3048 /*
3049  * rootnex_setup_cookie()
3050  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3051  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3052  *    out if it uses the copy buffer, and if it does, save away everything we'll
3053  *    need during sync.
3054  */
3055 static void
3056 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3057     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3058     page_t **cur_pp)
3059 {
3060 	boolean_t copybuf_sz_power_2;
3061 	rootnex_sglinfo_t *sinfo;
3062 	uint_t pidx;
3063 	uint_t pcnt;
3064 	off_t poff;
3065 #if defined(__amd64)
3066 	pfn_t pfn;
3067 #else
3068 	page_t **pplist;
3069 #endif
3070 
3071 	sinfo = &dma->dp_sglinfo;
3072 
3073 	/*
3074 	 * Calculate the page index relative to the start of the buffer. The
3075 	 * index to the current page for our buffer is the offset into the
3076 	 * first page of the buffer plus our current offset into the buffer
3077 	 * itself, shifted of course...
3078 	 */
3079 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3080 	ASSERT(pidx < sinfo->si_max_pages);
3081 
3082 	/* if this cookie uses the copy buffer */
3083 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3084 		/*
3085 		 * NOTE: we know that since this cookie uses the copy buffer, it
3086 		 * is <= MMU_PAGESIZE.
3087 		 */
3088 
3089 		/*
3090 		 * get the offset into the page. For the 64-bit kernel, get the
3091 		 * pfn which we'll use with seg kpm.
3092 		 */
3093 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
3094 #if defined(__amd64)
3095 		pfn = cookie->_dmu._dmac_ll >> MMU_PAGESHIFT;
3096 #endif
3097 
3098 		/* figure out if the copybuf size is a power of 2 */
3099 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3100 			copybuf_sz_power_2 = B_FALSE;
3101 		} else {
3102 			copybuf_sz_power_2 = B_TRUE;
3103 		}
3104 
3105 		/* This page uses the copy buffer */
3106 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3107 
3108 		/*
3109 		 * save the copy buffer KVA that we'll use with this page.
3110 		 * if we still fit within the copybuf, it's a simple add.
3111 		 * otherwise, we need to wrap over using & or % accordingly.
3112 		 */
3113 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3114 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3115 			    *copybuf_used;
3116 		} else {
3117 			if (copybuf_sz_power_2) {
3118 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3119 				    (uintptr_t)dma->dp_cbaddr +
3120 				    (*copybuf_used &
3121 				    (dma->dp_copybuf_size - 1)));
3122 			} else {
3123 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3124 				    (uintptr_t)dma->dp_cbaddr +
3125 				    (*copybuf_used % dma->dp_copybuf_size));
3126 			}
3127 		}
3128 
3129 		/*
3130 		 * over write the cookie physical address with the address of
3131 		 * the physical address of the copy buffer page that we will
3132 		 * use.
3133 		 */
3134 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3135 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3136 
3137 		/* if we have a kernel VA, it's easy, just save that address */
3138 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3139 		    (sinfo->si_asp == &kas)) {
3140 			/*
3141 			 * save away the page aligned virtual address of the
3142 			 * driver buffer. Offsets are handled in the sync code.
3143 			 */
3144 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3145 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3146 			    & MMU_PAGEMASK);
3147 #if !defined(__amd64)
3148 			/*
3149 			 * we didn't need to, and will never need to map this
3150 			 * page.
3151 			 */
3152 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3153 #endif
3154 
3155 		/* we don't have a kernel VA. We need one for the bcopy. */
3156 		} else {
3157 #if defined(__amd64)
3158 			/*
3159 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3160 			 * get a Kernel VA for the corresponding pfn.
3161 			 */
3162 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3163 #else
3164 			/*
3165 			 * for the 32-bit kernel, this is a pain. First we'll
3166 			 * save away the page_t or user VA for this page. This
3167 			 * is needed in rootnex_dma_win() when we switch to a
3168 			 * new window which requires us to re-map the copy
3169 			 * buffer.
3170 			 */
3171 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3172 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3173 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3174 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3175 			} else if (pplist != NULL) {
3176 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3177 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3178 			} else {
3179 				dma->dp_pgmap[pidx].pm_pp = NULL;
3180 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3181 				    (((uintptr_t)
3182 				    dmar_object->dmao_obj.virt_obj.v_addr +
3183 				    cur_offset) & MMU_PAGEMASK);
3184 			}
3185 
3186 			/*
3187 			 * save away the page aligned virtual address which was
3188 			 * allocated from the kernel heap arena (taking into
3189 			 * account if we need more copy buffer than we alloced
3190 			 * and use multiple windows to handle this, i.e. &,%).
3191 			 * NOTE: there isn't and physical memory backing up this
3192 			 * virtual address space currently.
3193 			 */
3194 			if ((*copybuf_used + MMU_PAGESIZE) <=
3195 			    dma->dp_copybuf_size) {
3196 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3197 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3198 				    MMU_PAGEMASK);
3199 			} else {
3200 				if (copybuf_sz_power_2) {
3201 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3202 					    (((uintptr_t)dma->dp_kva +
3203 					    (*copybuf_used &
3204 					    (dma->dp_copybuf_size - 1))) &
3205 					    MMU_PAGEMASK);
3206 				} else {
3207 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3208 					    (((uintptr_t)dma->dp_kva +
3209 					    (*copybuf_used %
3210 					    dma->dp_copybuf_size)) &
3211 					    MMU_PAGEMASK);
3212 				}
3213 			}
3214 
3215 			/*
3216 			 * if we haven't used up the available copy buffer yet,
3217 			 * map the kva to the physical page.
3218 			 */
3219 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3220 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3221 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3222 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3223 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3224 					    dma->dp_pgmap[pidx].pm_kaddr);
3225 				} else {
3226 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3227 					    sinfo->si_asp,
3228 					    dma->dp_pgmap[pidx].pm_kaddr);
3229 				}
3230 
3231 			/*
3232 			 * we've used up the available copy buffer, this page
3233 			 * will have to be mapped during rootnex_dma_win() when
3234 			 * we switch to a new window which requires a re-map
3235 			 * the copy buffer. (32-bit kernel only)
3236 			 */
3237 			} else {
3238 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3239 			}
3240 #endif
3241 			/* go to the next page_t */
3242 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3243 				*cur_pp = (*cur_pp)->p_next;
3244 			}
3245 		}
3246 
3247 		/* add to the copy buffer count */
3248 		*copybuf_used += MMU_PAGESIZE;
3249 
3250 	/*
3251 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3252 	 * cookie occupies to reflect this.
3253 	 */
3254 	} else {
3255 		/*
3256 		 * figure out how many pages the cookie occupies. We need to
3257 		 * use the original page offset of the buffer and the cookies
3258 		 * offset in the buffer to do this.
3259 		 */
3260 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3261 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3262 
3263 		while (pcnt > 0) {
3264 #if !defined(__amd64)
3265 			/*
3266 			 * the 32-bit kernel doesn't have seg kpm, so we need
3267 			 * to map in the driver buffer (if it didn't come down
3268 			 * with a kernel VA) on the fly. Since this page doesn't
3269 			 * use the copy buffer, it's not, or will it ever, have
3270 			 * to be mapped in.
3271 			 */
3272 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3273 #endif
3274 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3275 
3276 			/*
3277 			 * we need to update pidx and cur_pp or we'll loose
3278 			 * track of where we are.
3279 			 */
3280 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3281 				*cur_pp = (*cur_pp)->p_next;
3282 			}
3283 			pidx++;
3284 			pcnt--;
3285 		}
3286 	}
3287 }
3288 
3289 
3290 /*
3291  * rootnex_sgllen_window_boundary()
3292  *    Called in the bind slow path when the next cookie causes us to exceed (in
3293  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3294  *    length supported by the DMA H/W.
3295  */
3296 static int
3297 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3298     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3299     off_t cur_offset)
3300 {
3301 	off_t new_offset;
3302 	size_t trim_sz;
3303 	off_t coffset;
3304 
3305 
3306 	/*
3307 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3308 	 * the next window and init it. We're done.
3309 	 */
3310 	if (!dma->dp_trim_required) {
3311 		(*windowp)++;
3312 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3313 		(*windowp)->wd_cookie_cnt++;
3314 		(*windowp)->wd_size = cookie->dmac_size;
3315 		return (DDI_SUCCESS);
3316 	}
3317 
3318 	/* figure out how much we need to trim from the window */
3319 	ASSERT(attr->dma_attr_granular != 0);
3320 	if (dma->dp_granularity_power_2) {
3321 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3322 	} else {
3323 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3324 	}
3325 
3326 	/* The window's a whole multiple of granularity. We're done */
3327 	if (trim_sz == 0) {
3328 		(*windowp)++;
3329 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3330 		(*windowp)->wd_cookie_cnt++;
3331 		(*windowp)->wd_size = cookie->dmac_size;
3332 		return (DDI_SUCCESS);
3333 	}
3334 
3335 	/*
3336 	 * The window's not a whole multiple of granularity, since we know this
3337 	 * is due to the sgllen, we need to go back to the last cookie and trim
3338 	 * that one, add the left over part of the old cookie into the new
3339 	 * window, and then add in the new cookie into the new window.
3340 	 */
3341 
3342 	/*
3343 	 * make sure the driver isn't making us do something bad... Trimming and
3344 	 * sgllen == 1 don't go together.
3345 	 */
3346 	if (attr->dma_attr_sgllen == 1) {
3347 		return (DDI_DMA_NOMAPPING);
3348 	}
3349 
3350 	/*
3351 	 * first, setup the current window to account for the trim. Need to go
3352 	 * back to the last cookie for this.
3353 	 */
3354 	cookie--;
3355 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3356 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3357 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3358 	ASSERT(cookie->dmac_size > trim_sz);
3359 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3360 	(*windowp)->wd_size -= trim_sz;
3361 
3362 	/* save the buffer offsets for the next window */
3363 	coffset = cookie->dmac_size - trim_sz;
3364 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3365 
3366 	/*
3367 	 * set this now in case this is the first window. all other cases are
3368 	 * set in dma_win()
3369 	 */
3370 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3371 
3372 	/*
3373 	 * initialize the next window using what's left over in the previous
3374 	 * cookie.
3375 	 */
3376 	(*windowp)++;
3377 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3378 	(*windowp)->wd_cookie_cnt++;
3379 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3380 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3381 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3382 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3383 		(*windowp)->wd_dosync = B_TRUE;
3384 	}
3385 
3386 	/*
3387 	 * now go back to the current cookie and add it to the new window. set
3388 	 * the new window size to the what was left over from the previous
3389 	 * cookie and what's in the current cookie.
3390 	 */
3391 	cookie++;
3392 	(*windowp)->wd_cookie_cnt++;
3393 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3394 
3395 	/*
3396 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3397 	 * a max size of maxxfer). Handle that case.
3398 	 */
3399 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3400 		/*
3401 		 * maxxfer is already a whole multiple of granularity, and this
3402 		 * trim will be <= the previous trim (since a cookie can't be
3403 		 * larger than maxxfer). Make things simple here.
3404 		 */
3405 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3406 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3407 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3408 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3409 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3410 		(*windowp)->wd_size -= trim_sz;
3411 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3412 
3413 		/* save the buffer offsets for the next window */
3414 		coffset = cookie->dmac_size - trim_sz;
3415 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3416 
3417 		/* setup the next window */
3418 		(*windowp)++;
3419 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3420 		(*windowp)->wd_cookie_cnt++;
3421 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3422 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3423 		    coffset;
3424 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3425 	}
3426 
3427 	return (DDI_SUCCESS);
3428 }
3429 
3430 
3431 /*
3432  * rootnex_copybuf_window_boundary()
3433  *    Called in bind slowpath when we get to a window boundary because we used
3434  *    up all the copy buffer that we have.
3435  */
3436 static int
3437 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3438     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3439     size_t *copybuf_used)
3440 {
3441 	rootnex_sglinfo_t *sinfo;
3442 	off_t new_offset;
3443 	size_t trim_sz;
3444 	off_t coffset;
3445 	uint_t pidx;
3446 	off_t poff;
3447 
3448 
3449 	sinfo = &dma->dp_sglinfo;
3450 
3451 	/*
3452 	 * the copy buffer should be a whole multiple of page size. We know that
3453 	 * this cookie is <= MMU_PAGESIZE.
3454 	 */
3455 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3456 
3457 	/*
3458 	 * from now on, all new windows in this bind need to be re-mapped during
3459 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3460 	 * space...
3461 	 */
3462 #if !defined(__amd64)
3463 	dma->dp_cb_remaping = B_TRUE;
3464 #endif
3465 
3466 	/* reset copybuf used */
3467 	*copybuf_used = 0;
3468 
3469 	/*
3470 	 * if we don't have to trim (since granularity is set to 1), go to the
3471 	 * next window and add the current cookie to it. We know the current
3472 	 * cookie uses the copy buffer since we're in this code path.
3473 	 */
3474 	if (!dma->dp_trim_required) {
3475 		(*windowp)++;
3476 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3477 
3478 		/* Add this cookie to the new window */
3479 		(*windowp)->wd_cookie_cnt++;
3480 		(*windowp)->wd_size += cookie->dmac_size;
3481 		*copybuf_used += MMU_PAGESIZE;
3482 		return (DDI_SUCCESS);
3483 	}
3484 
3485 	/*
3486 	 * *** may need to trim, figure it out.
3487 	 */
3488 
3489 	/* figure out how much we need to trim from the window */
3490 	if (dma->dp_granularity_power_2) {
3491 		trim_sz = (*windowp)->wd_size &
3492 		    (hp->dmai_attr.dma_attr_granular - 1);
3493 	} else {
3494 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3495 	}
3496 
3497 	/*
3498 	 * if the window's a whole multiple of granularity, go to the next
3499 	 * window, init it, then add in the current cookie. We know the current
3500 	 * cookie uses the copy buffer since we're in this code path.
3501 	 */
3502 	if (trim_sz == 0) {
3503 		(*windowp)++;
3504 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3505 
3506 		/* Add this cookie to the new window */
3507 		(*windowp)->wd_cookie_cnt++;
3508 		(*windowp)->wd_size += cookie->dmac_size;
3509 		*copybuf_used += MMU_PAGESIZE;
3510 		return (DDI_SUCCESS);
3511 	}
3512 
3513 	/*
3514 	 * *** We figured it out, we definitly need to trim
3515 	 */
3516 
3517 	/*
3518 	 * make sure the driver isn't making us do something bad...
3519 	 * Trimming and sgllen == 1 don't go together.
3520 	 */
3521 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3522 		return (DDI_DMA_NOMAPPING);
3523 	}
3524 
3525 	/*
3526 	 * first, setup the current window to account for the trim. Need to go
3527 	 * back to the last cookie for this. Some of the last cookie will be in
3528 	 * the current window, and some of the last cookie will be in the new
3529 	 * window. All of the current cookie will be in the new window.
3530 	 */
3531 	cookie--;
3532 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3533 	(*windowp)->wd_trim.tr_last_cookie = cookie;
3534 	(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3535 	ASSERT(cookie->dmac_size > trim_sz);
3536 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3537 	(*windowp)->wd_size -= trim_sz;
3538 
3539 	/*
3540 	 * we're trimming the last cookie (not the current cookie). So that
3541 	 * last cookie may have or may not have been using the copy buffer (
3542 	 * we know the cookie passed in uses the copy buffer since we're in
3543 	 * this code path).
3544 	 *
3545 	 * If the last cookie doesn't use the copy buffer, nothing special to
3546 	 * do. However, if it does uses the copy buffer, it will be both the
3547 	 * last page in the current window and the first page in the next
3548 	 * window. Since we are reusing the copy buffer (and KVA space on the
3549 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3550 	 * current window, and the start of the copy buffer in the next window.
3551 	 * Track that info... The cookie physical address was already set to
3552 	 * the copy buffer physical address in setup_cookie..
3553 	 */
3554 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3555 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3556 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3557 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3558 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3559 		(*windowp)->wd_trim.tr_last_cbaddr =
3560 		    dma->dp_pgmap[pidx].pm_cbaddr;
3561 #if !defined(__amd64)
3562 		(*windowp)->wd_trim.tr_last_kaddr =
3563 		    dma->dp_pgmap[pidx].pm_kaddr;
3564 #endif
3565 	}
3566 
3567 	/* save the buffer offsets for the next window */
3568 	coffset = cookie->dmac_size - trim_sz;
3569 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3570 
3571 	/*
3572 	 * set this now in case this is the first window. all other cases are
3573 	 * set in dma_win()
3574 	 */
3575 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3576 
3577 	/*
3578 	 * initialize the next window using what's left over in the previous
3579 	 * cookie.
3580 	 */
3581 	(*windowp)++;
3582 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3583 	(*windowp)->wd_cookie_cnt++;
3584 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3585 	(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset;
3586 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3587 
3588 	/*
3589 	 * again, we're tracking if the last cookie uses the copy buffer.
3590 	 * read the comment above for more info on why we need to track
3591 	 * additional state.
3592 	 *
3593 	 * For the first cookie in the new window, we need reset the physical
3594 	 * address to DMA into to the start of the copy buffer plus any
3595 	 * initial page offset which may be present.
3596 	 */
3597 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3598 		(*windowp)->wd_dosync = B_TRUE;
3599 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3600 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3601 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3602 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
3603 		(*windowp)->wd_trim.tr_first_paddr = ptob64(hat_getpfnum(
3604 		    kas.a_hat, dma->dp_cbaddr)) + poff;
3605 #if !defined(__amd64)
3606 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3607 #endif
3608 		/* account for the cookie copybuf usage in the new window */
3609 		*copybuf_used += MMU_PAGESIZE;
3610 
3611 		/*
3612 		 * every piece of code has to have a hack, and here is this
3613 		 * ones :-)
3614 		 *
3615 		 * There is a complex interaction between setup_cookie and the
3616 		 * copybuf window boundary. The complexity had to be in either
3617 		 * the maxxfer window, or the copybuf window, and I chose the
3618 		 * copybuf code.
3619 		 *
3620 		 * So in this code path, we have taken the last cookie,
3621 		 * virtually broken it in half due to the trim, and it happens
3622 		 * to use the copybuf which further complicates life. At the
3623 		 * same time, we have already setup the current cookie, which
3624 		 * is now wrong. More background info: the current cookie uses
3625 		 * the copybuf, so it is only a page long max. So we need to
3626 		 * fix the current cookies copy buffer address, physical
3627 		 * address, and kva for the 32-bit kernel. We due this by
3628 		 * bumping them by page size (of course, we can't due this on
3629 		 * the physical address since the copy buffer may not be
3630 		 * physically contiguous).
3631 		 */
3632 		cookie++;
3633 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
3634 		poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET;
3635 		cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat,
3636 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
3637 #if !defined(__amd64)
3638 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3639 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3640 #endif
3641 	} else {
3642 		/* go back to the current cookie */
3643 		cookie++;
3644 	}
3645 
3646 	/*
3647 	 * add the current cookie to the new window. set the new window size to
3648 	 * the what was left over from the previous cookie and what's in the
3649 	 * current cookie.
3650 	 */
3651 	(*windowp)->wd_cookie_cnt++;
3652 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3653 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
3654 
3655 	/*
3656 	 * we know that the cookie passed in always uses the copy buffer. We
3657 	 * wouldn't be here if it didn't.
3658 	 */
3659 	*copybuf_used += MMU_PAGESIZE;
3660 
3661 	return (DDI_SUCCESS);
3662 }
3663 
3664 
3665 /*
3666  * rootnex_maxxfer_window_boundary()
3667  *    Called in bind slowpath when we get to a window boundary because we will
3668  *    go over maxxfer.
3669  */
3670 static int
3671 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3672     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
3673 {
3674 	size_t dmac_size;
3675 	off_t new_offset;
3676 	size_t trim_sz;
3677 	off_t coffset;
3678 
3679 
3680 	/*
3681 	 * calculate how much we have to trim off of the current cookie to equal
3682 	 * maxxfer. We don't have to account for granularity here since our
3683 	 * maxxfer already takes that into account.
3684 	 */
3685 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
3686 	ASSERT(trim_sz <= cookie->dmac_size);
3687 	ASSERT(trim_sz <= dma->dp_maxxfer);
3688 
3689 	/* save cookie size since we need it later and we might change it */
3690 	dmac_size = cookie->dmac_size;
3691 
3692 	/*
3693 	 * if we're not trimming the entire cookie, setup the current window to
3694 	 * account for the trim.
3695 	 */
3696 	if (trim_sz < cookie->dmac_size) {
3697 		(*windowp)->wd_cookie_cnt++;
3698 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3699 		(*windowp)->wd_trim.tr_last_cookie = cookie;
3700 		(*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll;
3701 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3702 		(*windowp)->wd_size = dma->dp_maxxfer;
3703 
3704 		/*
3705 		 * set the adjusted cookie size now in case this is the first
3706 		 * window. All other windows are taken care of in get win
3707 		 */
3708 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3709 	}
3710 
3711 	/*
3712 	 * coffset is the current offset within the cookie, new_offset is the
3713 	 * current offset with the entire buffer.
3714 	 */
3715 	coffset = dmac_size - trim_sz;
3716 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3717 
3718 	/* initialize the next window */
3719 	(*windowp)++;
3720 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3721 	(*windowp)->wd_cookie_cnt++;
3722 	(*windowp)->wd_size = trim_sz;
3723 	if (trim_sz < dmac_size) {
3724 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
3725 		(*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll +
3726 		    coffset;
3727 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3728 	}
3729 
3730 	return (DDI_SUCCESS);
3731 }
3732 
3733 
3734 /*
3735  * rootnex_dma_sync()
3736  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
3737  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
3738  *    is set, ddi_dma_sync() returns immediately passing back success.
3739  */
3740 /*ARGSUSED*/
3741 static int
3742 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3743     off_t off, size_t len, uint_t cache_flags)
3744 {
3745 	rootnex_sglinfo_t *sinfo;
3746 	rootnex_pgmap_t *cbpage;
3747 	rootnex_window_t *win;
3748 	ddi_dma_impl_t *hp;
3749 	rootnex_dma_t *dma;
3750 	caddr_t fromaddr;
3751 	caddr_t toaddr;
3752 	uint_t psize;
3753 	off_t offset;
3754 	uint_t pidx;
3755 	size_t size;
3756 	off_t poff;
3757 	int e;
3758 
3759 
3760 	hp = (ddi_dma_impl_t *)handle;
3761 	dma = (rootnex_dma_t *)hp->dmai_private;
3762 	sinfo = &dma->dp_sglinfo;
3763 
3764 	/*
3765 	 * if we don't have any windows, we don't need to sync. A copybuf
3766 	 * will cause us to have at least one window.
3767 	 */
3768 	if (dma->dp_window == NULL) {
3769 		return (DDI_SUCCESS);
3770 	}
3771 
3772 	/* This window may not need to be sync'd */
3773 	win = &dma->dp_window[dma->dp_current_win];
3774 	if (!win->wd_dosync) {
3775 		return (DDI_SUCCESS);
3776 	}
3777 
3778 	/* handle off and len special cases */
3779 	if ((off == 0) || (rootnex_sync_ignore_params)) {
3780 		offset = win->wd_offset;
3781 	} else {
3782 		offset = off;
3783 	}
3784 	if ((len == 0) || (rootnex_sync_ignore_params)) {
3785 		size = win->wd_size;
3786 	} else {
3787 		size = len;
3788 	}
3789 
3790 	/* check the sync args to make sure they make a little sense */
3791 	if (rootnex_sync_check_parms) {
3792 		e = rootnex_valid_sync_parms(hp, win, offset, size,
3793 		    cache_flags);
3794 		if (e != DDI_SUCCESS) {
3795 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
3796 			return (DDI_FAILURE);
3797 		}
3798 	}
3799 
3800 	/*
3801 	 * special case the first page to handle the offset into the page. The
3802 	 * offset to the current page for our buffer is the offset into the
3803 	 * first page of the buffer plus our current offset into the buffer
3804 	 * itself, masked of course.
3805 	 */
3806 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
3807 	psize = MIN((MMU_PAGESIZE - poff), size);
3808 
3809 	/* go through all the pages that we want to sync */
3810 	while (size > 0) {
3811 		/*
3812 		 * Calculate the page index relative to the start of the buffer.
3813 		 * The index to the current page for our buffer is the offset
3814 		 * into the first page of the buffer plus our current offset
3815 		 * into the buffer itself, shifted of course...
3816 		 */
3817 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
3818 		ASSERT(pidx < sinfo->si_max_pages);
3819 
3820 		/*
3821 		 * if this page uses the copy buffer, we need to sync it,
3822 		 * otherwise, go on to the next page.
3823 		 */
3824 		cbpage = &dma->dp_pgmap[pidx];
3825 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
3826 		    (cbpage->pm_uses_copybuf == B_FALSE));
3827 		if (cbpage->pm_uses_copybuf) {
3828 			/* cbaddr and kaddr should be page aligned */
3829 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
3830 			    MMU_PAGEOFFSET) == 0);
3831 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
3832 			    MMU_PAGEOFFSET) == 0);
3833 
3834 			/*
3835 			 * if we're copying for the device, we are going to
3836 			 * copy from the drivers buffer and to the rootnex
3837 			 * allocated copy buffer.
3838 			 */
3839 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
3840 				fromaddr = cbpage->pm_kaddr + poff;
3841 				toaddr = cbpage->pm_cbaddr + poff;
3842 				DTRACE_PROBE2(rootnex__sync__dev,
3843 				    dev_info_t *, dma->dp_dip, size_t, psize);
3844 
3845 			/*
3846 			 * if we're copying for the cpu/kernel, we are going to
3847 			 * copy from the rootnex allocated copy buffer to the
3848 			 * drivers buffer.
3849 			 */
3850 			} else {
3851 				fromaddr = cbpage->pm_cbaddr + poff;
3852 				toaddr = cbpage->pm_kaddr + poff;
3853 				DTRACE_PROBE2(rootnex__sync__cpu,
3854 				    dev_info_t *, dma->dp_dip, size_t, psize);
3855 			}
3856 
3857 			bcopy(fromaddr, toaddr, psize);
3858 		}
3859 
3860 		/*
3861 		 * decrement size until we're done, update our offset into the
3862 		 * buffer, and get the next page size.
3863 		 */
3864 		size -= psize;
3865 		offset += psize;
3866 		psize = MIN(MMU_PAGESIZE, size);
3867 
3868 		/* page offset is zero for the rest of this loop */
3869 		poff = 0;
3870 	}
3871 
3872 	return (DDI_SUCCESS);
3873 }
3874 
3875 
3876 /*
3877  * rootnex_valid_sync_parms()
3878  *    checks the parameters passed to sync to verify they are correct.
3879  */
3880 static int
3881 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
3882     off_t offset, size_t size, uint_t cache_flags)
3883 {
3884 	off_t woffset;
3885 
3886 
3887 	/*
3888 	 * the first part of the test to make sure the offset passed in is
3889 	 * within the window.
3890 	 */
3891 	if (offset < win->wd_offset) {
3892 		return (DDI_FAILURE);
3893 	}
3894 
3895 	/*
3896 	 * second and last part of the test to make sure the offset and length
3897 	 * passed in is within the window.
3898 	 */
3899 	woffset = offset - win->wd_offset;
3900 	if ((woffset + size) > win->wd_size) {
3901 		return (DDI_FAILURE);
3902 	}
3903 
3904 	/*
3905 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
3906 	 * be set too.
3907 	 */
3908 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
3909 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
3910 		return (DDI_SUCCESS);
3911 	}
3912 
3913 	/*
3914 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
3915 	 * should be set. Also DDI_DMA_READ should be set in the flags.
3916 	 */
3917 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
3918 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
3919 	    (hp->dmai_rflags & DDI_DMA_READ)) {
3920 		return (DDI_SUCCESS);
3921 	}
3922 
3923 	return (DDI_FAILURE);
3924 }
3925 
3926 
3927 /*
3928  * rootnex_dma_win()
3929  *    called from ddi_dma_getwin()
3930  */
3931 /*ARGSUSED*/
3932 static int
3933 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3934     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
3935     uint_t *ccountp)
3936 {
3937 	rootnex_window_t *window;
3938 	rootnex_trim_t *trim;
3939 	ddi_dma_impl_t *hp;
3940 	rootnex_dma_t *dma;
3941 #if !defined(__amd64)
3942 	rootnex_sglinfo_t *sinfo;
3943 	rootnex_pgmap_t *pmap;
3944 	uint_t pidx;
3945 	uint_t pcnt;
3946 	off_t poff;
3947 	int i;
3948 #endif
3949 
3950 
3951 	hp = (ddi_dma_impl_t *)handle;
3952 	dma = (rootnex_dma_t *)hp->dmai_private;
3953 #if !defined(__amd64)
3954 	sinfo = &dma->dp_sglinfo;
3955 #endif
3956 
3957 	/* If we try and get a window which doesn't exist, return failure */
3958 	if (win >= hp->dmai_nwin) {
3959 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
3960 		return (DDI_FAILURE);
3961 	}
3962 
3963 	/*
3964 	 * if we don't have any windows, and they're asking for the first
3965 	 * window, setup the cookie pointer to the first cookie in the bind.
3966 	 * setup our return values, then increment the cookie since we return
3967 	 * the first cookie on the stack.
3968 	 */
3969 	if (dma->dp_window == NULL) {
3970 		if (win != 0) {
3971 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
3972 			return (DDI_FAILURE);
3973 		}
3974 		hp->dmai_cookie = dma->dp_cookies;
3975 		*offp = 0;
3976 		*lenp = dma->dp_dma.dmao_size;
3977 		*ccountp = dma->dp_sglinfo.si_sgl_size;
3978 		*cookiep = hp->dmai_cookie[0];
3979 		hp->dmai_cookie++;
3980 		return (DDI_SUCCESS);
3981 	}
3982 
3983 	/* sync the old window before moving on to the new one */
3984 	window = &dma->dp_window[dma->dp_current_win];
3985 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
3986 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
3987 		    DDI_DMA_SYNC_FORCPU);
3988 	}
3989 
3990 #if !defined(__amd64)
3991 	/*
3992 	 * before we move to the next window, if we need to re-map, unmap all
3993 	 * the pages in this window.
3994 	 */
3995 	if (dma->dp_cb_remaping) {
3996 		/*
3997 		 * If we switch to this window again, we'll need to map in
3998 		 * on the fly next time.
3999 		 */
4000 		window->wd_remap_copybuf = B_TRUE;
4001 
4002 		/*
4003 		 * calculate the page index into the buffer where this window
4004 		 * starts, and the number of pages this window takes up.
4005 		 */
4006 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4007 		    MMU_PAGESHIFT;
4008 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4009 		    MMU_PAGEOFFSET;
4010 		pcnt = mmu_btopr(window->wd_size + poff);
4011 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4012 
4013 		/* unmap pages which are currently mapped in this window */
4014 		for (i = 0; i < pcnt; i++) {
4015 			if (dma->dp_pgmap[pidx].pm_mapped) {
4016 				hat_unload(kas.a_hat,
4017 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4018 				    HAT_UNLOAD);
4019 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4020 			}
4021 			pidx++;
4022 		}
4023 	}
4024 #endif
4025 
4026 	/*
4027 	 * Move to the new window.
4028 	 * NOTE: current_win must be set for sync to work right
4029 	 */
4030 	dma->dp_current_win = win;
4031 	window = &dma->dp_window[win];
4032 
4033 	/* if needed, adjust the first and/or last cookies for trim */
4034 	trim = &window->wd_trim;
4035 	if (trim->tr_trim_first) {
4036 		window->wd_first_cookie->_dmu._dmac_ll = trim->tr_first_paddr;
4037 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4038 #if !defined(__amd64)
4039 		window->wd_first_cookie->dmac_type =
4040 		    (window->wd_first_cookie->dmac_type &
4041 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4042 #endif
4043 		if (trim->tr_first_copybuf_win) {
4044 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4045 			    trim->tr_first_cbaddr;
4046 #if !defined(__amd64)
4047 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4048 			    trim->tr_first_kaddr;
4049 #endif
4050 		}
4051 	}
4052 	if (trim->tr_trim_last) {
4053 		trim->tr_last_cookie->_dmu._dmac_ll = trim->tr_last_paddr;
4054 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4055 		if (trim->tr_last_copybuf_win) {
4056 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4057 			    trim->tr_last_cbaddr;
4058 #if !defined(__amd64)
4059 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4060 			    trim->tr_last_kaddr;
4061 #endif
4062 		}
4063 	}
4064 
4065 	/*
4066 	 * setup the cookie pointer to the first cookie in the window. setup
4067 	 * our return values, then increment the cookie since we return the
4068 	 * first cookie on the stack.
4069 	 */
4070 	hp->dmai_cookie = window->wd_first_cookie;
4071 	*offp = window->wd_offset;
4072 	*lenp = window->wd_size;
4073 	*ccountp = window->wd_cookie_cnt;
4074 	*cookiep = hp->dmai_cookie[0];
4075 	hp->dmai_cookie++;
4076 
4077 #if !defined(__amd64)
4078 	/* re-map copybuf if required for this window */
4079 	if (dma->dp_cb_remaping) {
4080 		/*
4081 		 * calculate the page index into the buffer where this
4082 		 * window starts.
4083 		 */
4084 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4085 		    MMU_PAGESHIFT;
4086 		ASSERT(pidx < sinfo->si_max_pages);
4087 
4088 		/*
4089 		 * the first page can get unmapped if it's shared with the
4090 		 * previous window. Even if the rest of this window is already
4091 		 * mapped in, we need to still check this one.
4092 		 */
4093 		pmap = &dma->dp_pgmap[pidx];
4094 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4095 			if (pmap->pm_pp != NULL) {
4096 				pmap->pm_mapped = B_TRUE;
4097 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4098 			} else if (pmap->pm_vaddr != NULL) {
4099 				pmap->pm_mapped = B_TRUE;
4100 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4101 				    pmap->pm_kaddr);
4102 			}
4103 		}
4104 		pidx++;
4105 
4106 		/* map in the rest of the pages if required */
4107 		if (window->wd_remap_copybuf) {
4108 			window->wd_remap_copybuf = B_FALSE;
4109 
4110 			/* figure out many pages this window takes up */
4111 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4112 			    MMU_PAGEOFFSET;
4113 			pcnt = mmu_btopr(window->wd_size + poff);
4114 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4115 
4116 			/* map pages which require it */
4117 			for (i = 1; i < pcnt; i++) {
4118 				pmap = &dma->dp_pgmap[pidx];
4119 				if (pmap->pm_uses_copybuf) {
4120 					ASSERT(pmap->pm_mapped == B_FALSE);
4121 					if (pmap->pm_pp != NULL) {
4122 						pmap->pm_mapped = B_TRUE;
4123 						i86_pp_map(pmap->pm_pp,
4124 						    pmap->pm_kaddr);
4125 					} else if (pmap->pm_vaddr != NULL) {
4126 						pmap->pm_mapped = B_TRUE;
4127 						i86_va_map(pmap->pm_vaddr,
4128 						    sinfo->si_asp,
4129 						    pmap->pm_kaddr);
4130 					}
4131 				}
4132 				pidx++;
4133 			}
4134 		}
4135 	}
4136 #endif
4137 
4138 	/* if the new window uses the copy buffer, sync it for the device */
4139 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4140 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4141 		    DDI_DMA_SYNC_FORDEV);
4142 	}
4143 
4144 	return (DDI_SUCCESS);
4145 }
4146 
4147 
4148 
4149 /*
4150  * ************************
4151  *  obsoleted dma routines
4152  * ************************
4153  */
4154 
4155 /*
4156  * rootnex_dma_map()
4157  *    called from ddi_dma_setup()
4158  */
4159 /* ARGSUSED */
4160 static int
4161 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq,
4162     ddi_dma_handle_t *handlep)
4163 {
4164 #if defined(__amd64)
4165 	/*
4166 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4167 	 * rootnex_dma_mctl()
4168 	 */
4169 	return (DDI_DMA_NORESOURCES);
4170 
4171 #else /* 32-bit x86 kernel */
4172 	ddi_dma_handle_t *lhandlep;
4173 	ddi_dma_handle_t lhandle;
4174 	ddi_dma_cookie_t cookie;
4175 	ddi_dma_attr_t dma_attr;
4176 	ddi_dma_lim_t *dma_lim;
4177 	uint_t ccnt;
4178 	int e;
4179 
4180 
4181 	/*
4182 	 * if the driver is just testing to see if it's possible to do the bind,
4183 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4184 	 */
4185 	if (handlep == NULL) {
4186 		lhandlep = &lhandle;
4187 	} else {
4188 		lhandlep = handlep;
4189 	}
4190 
4191 	/* convert the limit structure to a dma_attr one */
4192 	dma_lim = dmareq->dmar_limits;
4193 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4194 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4195 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4196 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4197 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4198 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4199 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4200 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4201 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4202 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4203 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4204 	dma_attr.dma_attr_flags = 0;
4205 
4206 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4207 	    dmareq->dmar_arg, lhandlep);
4208 	if (e != DDI_SUCCESS) {
4209 		return (e);
4210 	}
4211 
4212 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4213 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4214 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4215 		return (e);
4216 	}
4217 
4218 	/*
4219 	 * if the driver is just testing to see if it's possible to do the bind,
4220 	 * free up the local state and return the result.
4221 	 */
4222 	if (handlep == NULL) {
4223 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4224 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4225 		if (e == DDI_DMA_MAPPED) {
4226 			return (DDI_DMA_MAPOK);
4227 		} else {
4228 			return (DDI_DMA_NOMAPPING);
4229 		}
4230 	}
4231 
4232 	return (e);
4233 #endif /* defined(__amd64) */
4234 }
4235 
4236 
4237 /*
4238  * rootnex_dma_mctl()
4239  *
4240  */
4241 /* ARGSUSED */
4242 static int
4243 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4244     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4245     uint_t cache_flags)
4246 {
4247 #if defined(__amd64)
4248 	/*
4249 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4250 	 * common implementation in genunix, so they no longer have x86
4251 	 * specific functionality which called into dma_ctl.
4252 	 *
4253 	 * The rest of the obsoleted interfaces were never supported in the
4254 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4255 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4256 	 * implementation issues.
4257 	 *
4258 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4259 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4260 	 * reflect that now too...
4261 	 *
4262 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4263 	 * not going to put this functionality into the 64-bit x86 kernel now.
4264 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4265 	 * that in a future release.
4266 	 */
4267 	return (DDI_FAILURE);
4268 
4269 #else /* 32-bit x86 kernel */
4270 	ddi_dma_cookie_t lcookie;
4271 	ddi_dma_cookie_t *cookie;
4272 	rootnex_window_t *window;
4273 	ddi_dma_impl_t *hp;
4274 	rootnex_dma_t *dma;
4275 	uint_t nwin;
4276 	uint_t ccnt;
4277 	size_t len;
4278 	off_t off;
4279 	int e;
4280 
4281 
4282 	/*
4283 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4284 	 * hacky since were optimizing for the current interfaces and so we can
4285 	 * cleanup the mess in genunix. Hopefully we will remove the this
4286 	 * obsoleted routines someday soon.
4287 	 */
4288 
4289 	switch (request) {
4290 
4291 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4292 		hp = (ddi_dma_impl_t *)handle;
4293 		cookie = (ddi_dma_cookie_t *)objpp;
4294 
4295 		/*
4296 		 * convert segment to cookie. We don't distinguish between the
4297 		 * two :-)
4298 		 */
4299 		*cookie = *hp->dmai_cookie;
4300 		*lenp = cookie->dmac_size;
4301 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4302 		return (DDI_SUCCESS);
4303 
4304 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4305 		hp = (ddi_dma_impl_t *)handle;
4306 		dma = (rootnex_dma_t *)hp->dmai_private;
4307 
4308 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4309 			return (DDI_DMA_STALE);
4310 		}
4311 
4312 		/* handle the case where we don't have any windows */
4313 		if (dma->dp_window == NULL) {
4314 			/*
4315 			 * if seg == NULL, and we don't have any windows,
4316 			 * return the first cookie in the sgl.
4317 			 */
4318 			if (*lenp == NULL) {
4319 				dma->dp_current_cookie = 0;
4320 				hp->dmai_cookie = dma->dp_cookies;
4321 				*objpp = (caddr_t)handle;
4322 				return (DDI_SUCCESS);
4323 
4324 			/* if we have more cookies, go to the next cookie */
4325 			} else {
4326 				if ((dma->dp_current_cookie + 1) >=
4327 				    dma->dp_sglinfo.si_sgl_size) {
4328 					return (DDI_DMA_DONE);
4329 				}
4330 				dma->dp_current_cookie++;
4331 				hp->dmai_cookie++;
4332 				return (DDI_SUCCESS);
4333 			}
4334 		}
4335 
4336 		/* We have one or more windows */
4337 		window = &dma->dp_window[dma->dp_current_win];
4338 
4339 		/*
4340 		 * if seg == NULL, return the first cookie in the current
4341 		 * window
4342 		 */
4343 		if (*lenp == NULL) {
4344 			dma->dp_current_cookie = 0;
4345 			hp->dmai_cookie = window->wd_first_cookie;
4346 
4347 		/*
4348 		 * go to the next cookie in the window then see if we done with
4349 		 * this window.
4350 		 */
4351 		} else {
4352 			if ((dma->dp_current_cookie + 1) >=
4353 			    window->wd_cookie_cnt) {
4354 				return (DDI_DMA_DONE);
4355 			}
4356 			dma->dp_current_cookie++;
4357 			hp->dmai_cookie++;
4358 		}
4359 		*objpp = (caddr_t)handle;
4360 		return (DDI_SUCCESS);
4361 
4362 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4363 		hp = (ddi_dma_impl_t *)handle;
4364 		dma = (rootnex_dma_t *)hp->dmai_private;
4365 
4366 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4367 			return (DDI_DMA_STALE);
4368 		}
4369 
4370 		/* if win == NULL, return the first window in the bind */
4371 		if (*offp == NULL) {
4372 			nwin = 0;
4373 
4374 		/*
4375 		 * else, go to the next window then see if we're done with all
4376 		 * the windows.
4377 		 */
4378 		} else {
4379 			nwin = dma->dp_current_win + 1;
4380 			if (nwin >= hp->dmai_nwin) {
4381 				return (DDI_DMA_DONE);
4382 			}
4383 		}
4384 
4385 		/* switch to the next window */
4386 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4387 		    &lcookie, &ccnt);
4388 		ASSERT(e == DDI_SUCCESS);
4389 		if (e != DDI_SUCCESS) {
4390 			return (DDI_DMA_STALE);
4391 		}
4392 
4393 		/* reset the cookie back to the first cookie in the window */
4394 		if (dma->dp_window != NULL) {
4395 			window = &dma->dp_window[dma->dp_current_win];
4396 			hp->dmai_cookie = window->wd_first_cookie;
4397 		} else {
4398 			hp->dmai_cookie = dma->dp_cookies;
4399 		}
4400 
4401 		*objpp = (caddr_t)handle;
4402 		return (DDI_SUCCESS);
4403 
4404 	case DDI_DMA_FREE: /* ddi_dma_free() */
4405 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4406 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4407 		if (rootnex_state->r_dvma_call_list_id) {
4408 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4409 		}
4410 		return (DDI_SUCCESS);
4411 
4412 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4413 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4414 		/* should never get here, handled in genunix */
4415 		ASSERT(0);
4416 		return (DDI_FAILURE);
4417 
4418 	case DDI_DMA_KVADDR:
4419 	case DDI_DMA_GETERR:
4420 	case DDI_DMA_COFF:
4421 		return (DDI_FAILURE);
4422 	}
4423 
4424 	return (DDI_FAILURE);
4425 #endif /* defined(__amd64) */
4426 }
4427 
4428 /*ARGSUSED*/
4429 static int
4430 rootnex_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used)
4431 {
4432 	return (rootnex_fm_ma_ta_panic_flag ? DDI_FM_FATAL : DDI_FM_NONFATAL);
4433 }
4434