xref: /titanic_50/usr/src/uts/i86pc/io/xsvc/xsvc.c (revision 3eae19d9cf3390cf5b75e10c9c1945fd36ad856a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/errno.h>
30 #include <sys/types.h>
31 #include <sys/conf.h>
32 #include <sys/kmem.h>
33 #include <sys/ddi.h>
34 #include <sys/stat.h>
35 #include <sys/sunddi.h>
36 #include <sys/file.h>
37 #include <sys/open.h>
38 #include <sys/modctl.h>
39 #include <sys/ddi_impldefs.h>
40 #include <vm/seg_kmem.h>
41 #include <sys/vmsystm.h>
42 #include <sys/sysmacros.h>
43 #include <sys/ddidevmap.h>
44 #include <sys/avl.h>
45 
46 #include <sys/xsvc.h>
47 
48 /* total max memory which can be alloced with ioctl interface */
49 uint64_t xsvc_max_memory = 10 * 1024 * 1024;
50 
51 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
52 
53 
54 static int xsvc_open(dev_t *devp, int flag, int otyp, cred_t *cred);
55 static int xsvc_close(dev_t devp, int flag, int otyp, cred_t *cred);
56 static int xsvc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred,
57     int *rval);
58 static int xsvc_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
59     size_t *maplen, uint_t model);
60 static int xsvc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
61 static int xsvc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
62 static int xsvc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
63     void **result);
64 
65 static 	struct cb_ops xsvc_cb_ops = {
66 	xsvc_open,		/* cb_open */
67 	xsvc_close,		/* cb_close */
68 	nodev,			/* cb_strategy */
69 	nodev,			/* cb_print */
70 	nodev,			/* cb_dump */
71 	nodev,			/* cb_read */
72 	nodev,			/* cb_write */
73 	xsvc_ioctl,		/* cb_ioctl */
74 	xsvc_devmap,		/* cb_devmap */
75 	NULL,			/* cb_mmap */
76 	NULL,			/* cb_segmap */
77 	nochpoll,		/* cb_chpoll */
78 	ddi_prop_op,		/* cb_prop_op */
79 	NULL,			/* cb_stream */
80 	D_NEW | D_MP | D_64BIT | D_DEVMAP,	/* cb_flag */
81 	CB_REV
82 };
83 
84 static struct dev_ops xsvc_dev_ops = {
85 	DEVO_REV,		/* devo_rev */
86 	0,			/* devo_refcnt */
87 	xsvc_getinfo,		/* devo_getinfo */
88 	nulldev,		/* devo_identify */
89 	nulldev,		/* devo_probe */
90 	xsvc_attach,		/* devo_attach */
91 	xsvc_detach,		/* devo_detach */
92 	nodev,			/* devo_reset */
93 	&xsvc_cb_ops,		/* devo_cb_ops */
94 	NULL,			/* devo_bus_ops */
95 	NULL			/* power */
96 };
97 
98 static struct modldrv xsvc_modldrv = {
99 	&mod_driverops,		/* Type of module.  This one is a driver */
100 	"xsvc driver v%I%",	/* Name of the module. */
101 	&xsvc_dev_ops,		/* driver ops */
102 };
103 
104 static struct modlinkage xsvc_modlinkage = {
105 	MODREV_1,
106 	(void *) &xsvc_modldrv,
107 	NULL
108 };
109 
110 
111 static int xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode);
112 static int xsvc_ioctl_flush_memory(xsvc_state_t *state, void *arg, int mode);
113 static int xsvc_ioctl_free_memory(xsvc_state_t *state, void *arg, int mode);
114 static int xsvc_mem_alloc(xsvc_state_t *state, uint64_t key,
115     xsvc_mem_t **mp);
116 static void xsvc_mem_free(xsvc_state_t *state, xsvc_mem_t *mp);
117 static xsvc_mem_t *xsvc_mem_lookup(xsvc_state_t *state,
118     uint64_t key);
119 static int xsvc_mnode_key_compare(const void *q, const void *e);
120 static int xsvc_umem_cookie_alloc(caddr_t kva, size_t size, int flags,
121     ddi_umem_cookie_t *cookiep);
122 static void xsvc_umem_cookie_free(ddi_umem_cookie_t *cookiep);
123 static void xsvc_devmap_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
124     size_t len, devmap_cookie_t new_dhp1, void **new_pvtp1,
125     devmap_cookie_t new_dhp2, void **new_pvtp2);
126 
127 
128 void *xsvc_statep;
129 
130 static ddi_device_acc_attr_t xsvc_device_attr = {
131 	DDI_DEVICE_ATTR_V0,
132 	DDI_NEVERSWAP_ACC,
133 	DDI_STRICTORDER_ACC
134 };
135 
136 static struct devmap_callback_ctl xsvc_callbk = {
137 	DEVMAP_OPS_REV,
138 	NULL,
139 	NULL,
140 	NULL,
141 	xsvc_devmap_unmap
142 };
143 
144 
145 /*
146  * _init()
147  *
148  */
149 int
150 _init(void)
151 {
152 	int err;
153 
154 	err = ddi_soft_state_init(&xsvc_statep, sizeof (xsvc_state_t), 1);
155 	if (err != 0) {
156 		return (err);
157 	}
158 
159 	err = mod_install(&xsvc_modlinkage);
160 	if (err != 0) {
161 		ddi_soft_state_fini(&xsvc_statep);
162 		return (err);
163 	}
164 
165 	return (0);
166 }
167 
168 /*
169  * _info()
170  *
171  */
172 int
173 _info(struct modinfo *modinfop)
174 {
175 	return (mod_info(&xsvc_modlinkage, modinfop));
176 }
177 
178 /*
179  * _fini()
180  *
181  */
182 int
183 _fini(void)
184 {
185 	int err;
186 
187 	err = mod_remove(&xsvc_modlinkage);
188 	if (err != 0) {
189 		return (err);
190 	}
191 
192 	ddi_soft_state_fini(&xsvc_statep);
193 
194 	return (0);
195 }
196 
197 /*
198  * xsvc_attach()
199  *
200  */
201 static int
202 xsvc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
203 {
204 	xsvc_state_t *state;
205 	int maxallocmem;
206 	int instance;
207 	int err;
208 
209 
210 	switch (cmd) {
211 	case DDI_ATTACH:
212 		break;
213 
214 	case DDI_RESUME:
215 		return (DDI_SUCCESS);
216 
217 	default:
218 		return (DDI_FAILURE);
219 	}
220 
221 	instance = ddi_get_instance(dip);
222 	err = ddi_soft_state_zalloc(xsvc_statep, instance);
223 	if (err != DDI_SUCCESS) {
224 		return (DDI_FAILURE);
225 	}
226 	state = ddi_get_soft_state(xsvc_statep, instance);
227 	if (state == NULL) {
228 		goto attachfail_get_soft_state;
229 	}
230 
231 	state->xs_dip = dip;
232 	state->xs_instance = instance;
233 
234 	/* Initialize allocation count */
235 	mutex_init(&state->xs_mutex, NULL, MUTEX_DRIVER, NULL);
236 	state->xs_currently_alloced = 0;
237 
238 	/* create the minor node (for the ioctl) */
239 	err = ddi_create_minor_node(dip, "xsvc", S_IFCHR, instance, DDI_PSEUDO,
240 	    0);
241 	if (err != DDI_SUCCESS) {
242 		goto attachfail_minor_node;
243 	}
244 
245 	/*
246 	 * the maxallocmem property will override the default (xsvc_max_memory).
247 	 * This is the maximum total memory the ioctl will allow to be alloced.
248 	 */
249 	maxallocmem = ddi_prop_get_int(DDI_DEV_T_ANY, state->xs_dip,
250 	    DDI_PROP_DONTPASS, "maxallocmem", -1);
251 	if (maxallocmem >= 0) {
252 		xsvc_max_memory = maxallocmem * 1024;
253 	}
254 
255 	/* Initialize list of memory allocs */
256 	mutex_init(&state->xs_mlist.ml_mutex, NULL, MUTEX_DRIVER, NULL);
257 	avl_create(&state->xs_mlist.ml_avl, xsvc_mnode_key_compare,
258 	    sizeof (xsvc_mnode_t), offsetof(xsvc_mnode_t, mn_link));
259 
260 	/* Report that driver was loaded */
261 	ddi_report_dev(dip);
262 
263 	return (DDI_SUCCESS);
264 
265 attachfail_minor_node:
266 	mutex_destroy(&state->xs_mutex);
267 attachfail_get_soft_state:
268 	(void) ddi_soft_state_free(xsvc_statep, instance);
269 
270 	return (err);
271 }
272 
273 /*
274  * xsvc_detach()
275  *
276  */
277 static int
278 xsvc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
279 {
280 	xsvc_state_t *state;
281 	xsvc_mnode_t *mnode;
282 	xsvc_mem_t *mp;
283 	int instance;
284 
285 
286 	instance = ddi_get_instance(dip);
287 	state = ddi_get_soft_state(xsvc_statep, instance);
288 	if (state == NULL) {
289 		return (DDI_FAILURE);
290 	}
291 
292 	switch (cmd) {
293 	case DDI_DETACH:
294 		break;
295 
296 	case DDI_SUSPEND:
297 		return (DDI_SUCCESS);
298 
299 	default:
300 		return (DDI_FAILURE);
301 	}
302 
303 	ddi_remove_minor_node(dip, NULL);
304 
305 	/* Free any memory on list */
306 	while ((mnode = avl_first(&state->xs_mlist.ml_avl)) != NULL) {
307 		mp = mnode->mn_home;
308 		xsvc_mem_free(state, mp);
309 	}
310 
311 	/* remove list */
312 	avl_destroy(&state->xs_mlist.ml_avl);
313 	mutex_destroy(&state->xs_mlist.ml_mutex);
314 
315 	mutex_destroy(&state->xs_mutex);
316 	(void) ddi_soft_state_free(xsvc_statep, state->xs_instance);
317 	return (DDI_SUCCESS);
318 }
319 
320 /*
321  * xsvc_getinfo()
322  *
323  */
324 /*ARGSUSED*/
325 static int
326 xsvc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
327 {
328 	xsvc_state_t *state;
329 	int instance;
330 	dev_t dev;
331 	int err;
332 
333 
334 	dev = (dev_t)arg;
335 	instance = getminor(dev);
336 
337 	switch (cmd) {
338 	case DDI_INFO_DEVT2DEVINFO:
339 		state = ddi_get_soft_state(xsvc_statep, instance);
340 		if (state == NULL) {
341 			return (DDI_FAILURE);
342 		}
343 		*result = (void *)state->xs_dip;
344 		err = DDI_SUCCESS;
345 		break;
346 
347 	case DDI_INFO_DEVT2INSTANCE:
348 		*result = (void *)(uintptr_t)instance;
349 		err = DDI_SUCCESS;
350 		break;
351 
352 	default:
353 		err = DDI_FAILURE;
354 		break;
355 	}
356 
357 	return (err);
358 }
359 
360 
361 /*
362  * xsvc_open()
363  *
364  */
365 /*ARGSUSED*/
366 static int
367 xsvc_open(dev_t *devp, int flag, int otyp, cred_t *cred)
368 {
369 	xsvc_state_t *state;
370 	int instance;
371 
372 	instance = getminor(*devp);
373 	state = ddi_get_soft_state(xsvc_statep, instance);
374 	if (state == NULL) {
375 		return (ENXIO);
376 	}
377 
378 	return (0);
379 }
380 
381 /*
382  * xsvc_close()
383  *
384  */
385 /*ARGSUSED*/
386 static int
387 xsvc_close(dev_t devp, int flag, int otyp, cred_t *cred)
388 {
389 	return (0);
390 }
391 
392 /*
393  * xsvc_ioctl()
394  *
395  */
396 /*ARGSUSED*/
397 static int
398 xsvc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred, int *rval)
399 {
400 	xsvc_state_t *state;
401 	int instance;
402 	int err;
403 
404 
405 	err = drv_priv(cred);
406 	if (err != 0) {
407 		return (EPERM);
408 	}
409 	instance = getminor(dev);
410 	if (instance == -1) {
411 		return (EBADF);
412 	}
413 	state = ddi_get_soft_state(xsvc_statep, instance);
414 	if (state == NULL) {
415 		return (EBADF);
416 	}
417 
418 	switch (cmd) {
419 	case XSVC_ALLOC_MEM:
420 		err = xsvc_ioctl_alloc_memory(state, (void *)arg, mode);
421 		break;
422 
423 	case XSVC_FREE_MEM:
424 		err = xsvc_ioctl_free_memory(state, (void *)arg, mode);
425 		break;
426 
427 	case XSVC_FLUSH_MEM:
428 		err = xsvc_ioctl_flush_memory(state, (void *)arg, mode);
429 		break;
430 
431 	default:
432 		err = ENXIO;
433 	}
434 
435 	return (err);
436 }
437 
438 /*
439  * xsvc_ioctl_alloc_memory()
440  *
441  */
442 static int
443 xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode)
444 {
445 	xsvc_mem_req_32 params32;
446 	xsvc_mloc_32 *usgl32;
447 	xsvc_mem_req params;
448 	xsvc_mloc_32 sgl32;
449 	xsvc_mloc *usgl;
450 	xsvc_mem_t *mp;
451 	xsvc_mloc sgl;
452 	uint64_t key;
453 	size_t size;
454 	int err;
455 	int i;
456 
457 
458 	/* Copy in the params, then get the size and key */
459 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
460 		err = ddi_copyin(arg, &params32, sizeof (xsvc_mem_req_32),
461 		    mode);
462 		if (err != 0) {
463 			return (EFAULT);
464 		}
465 
466 		key = (uint64_t)params32.xsvc_mem_reqid;
467 		size = P2ROUNDUP((size_t)params32.xsvc_mem_size, PAGESIZE);
468 	} else {
469 		err = ddi_copyin(arg, &params, sizeof (xsvc_mem_req), mode);
470 		if (err != 0) {
471 			return (EFAULT);
472 		}
473 		key = (uint64_t)params.xsvc_mem_reqid;
474 		size = P2ROUNDUP(params.xsvc_mem_size, PAGESIZE);
475 	}
476 
477 	/*
478 	 * make sure this doesn't put us over the maximum allowed to be
479 	 * allocated
480 	 */
481 	mutex_enter(&state->xs_mutex);
482 	if ((state->xs_currently_alloced + size) > xsvc_max_memory) {
483 		mutex_exit(&state->xs_mutex);
484 		return (EAGAIN);
485 	}
486 	state->xs_currently_alloced += size;
487 	mutex_exit(&state->xs_mutex);
488 
489 	/* get state to track this memory */
490 	err = xsvc_mem_alloc(state, key, &mp);
491 	if (err != 0) {
492 		return (err);
493 	}
494 	mp->xm_size = size;
495 
496 	/* allocate and bind the memory */
497 	mp->xm_dma_attr.dma_attr_version = DMA_ATTR_V0;
498 	mp->xm_dma_attr.dma_attr_count_max = (uint64_t)0xFFFFFFFF;
499 	mp->xm_dma_attr.dma_attr_burstsizes = 1;
500 	mp->xm_dma_attr.dma_attr_minxfer = 1;
501 	mp->xm_dma_attr.dma_attr_maxxfer = (uint64_t)0xFFFFFFFF;
502 	mp->xm_dma_attr.dma_attr_seg = (uint64_t)0xFFFFFFFF;
503 	mp->xm_dma_attr.dma_attr_granular = 1;
504 	mp->xm_dma_attr.dma_attr_flags = 0;
505 
506 	/* Finish converting params */
507 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
508 		mp->xm_dma_attr.dma_attr_addr_lo = params32.xsvc_mem_addr_lo;
509 		mp->xm_dma_attr.dma_attr_addr_hi = params32.xsvc_mem_addr_hi;
510 		mp->xm_dma_attr.dma_attr_sgllen = params32.xsvc_mem_sgllen;
511 		usgl32 = (xsvc_mloc_32 *)(uintptr_t)params32.xsvc_sg_list;
512 		mp->xm_dma_attr.dma_attr_align = P2ROUNDUP(
513 		    params32.xsvc_mem_align, PAGESIZE);
514 	} else {
515 		mp->xm_dma_attr.dma_attr_addr_lo = params.xsvc_mem_addr_lo;
516 		mp->xm_dma_attr.dma_attr_addr_hi = params.xsvc_mem_addr_hi;
517 		mp->xm_dma_attr.dma_attr_sgllen = params.xsvc_mem_sgllen;
518 		usgl = (xsvc_mloc *)(uintptr_t)params.xsvc_sg_list;
519 		mp->xm_dma_attr.dma_attr_align = P2ROUNDUP(
520 		    params.xsvc_mem_align, PAGESIZE);
521 	}
522 
523 	mp->xm_device_attr = xsvc_device_attr;
524 
525 	err = ddi_dma_alloc_handle(state->xs_dip, &mp->xm_dma_attr,
526 	    DDI_DMA_SLEEP, NULL, &mp->xm_dma_handle);
527 	if (err != DDI_SUCCESS) {
528 		err = EINVAL;
529 		goto allocfail_alloc_handle;
530 	}
531 
532 	/* don't sleep here so we don't get stuck in contig alloc */
533 	err = ddi_dma_mem_alloc(mp->xm_dma_handle, mp->xm_size,
534 	    &mp->xm_device_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
535 	    &mp->xm_addr, &mp->xm_real_length, &mp->xm_mem_handle);
536 	if (err != DDI_SUCCESS) {
537 		err = EINVAL;
538 		goto allocfail_alloc_mem;
539 	}
540 
541 	err = ddi_dma_addr_bind_handle(mp->xm_dma_handle, NULL, mp->xm_addr,
542 	    mp->xm_size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
543 	    NULL, &mp->xm_cookie, &mp->xm_cookie_count);
544 	if (err != DDI_DMA_MAPPED) {
545 		err = EFAULT;
546 		goto allocfail_bind;
547 	}
548 
549 	/* return sgl */
550 	for (i = 0; i < mp->xm_cookie_count; i++) {
551 		if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
552 			sgl32.mloc_addr = mp->xm_cookie.dmac_laddress;
553 			sgl32.mloc_size = mp->xm_cookie.dmac_size;
554 			err = ddi_copyout(&sgl32, &usgl32[i],
555 			    sizeof (xsvc_mloc_32), mode);
556 			if (err != 0) {
557 				err = EFAULT;
558 				goto allocfail_copyout;
559 			}
560 		} else {
561 			sgl.mloc_addr = mp->xm_cookie.dmac_laddress;
562 			sgl.mloc_size = mp->xm_cookie.dmac_size;
563 			err = ddi_copyout(&sgl, &usgl[i], sizeof (xsvc_mloc),
564 			    mode);
565 			if (err != 0) {
566 				err = EFAULT;
567 				goto allocfail_copyout;
568 			}
569 		}
570 		ddi_dma_nextcookie(mp->xm_dma_handle, &mp->xm_cookie);
571 	}
572 
573 	/* set the last sgl entry to 0 to indicate cookie count */
574 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
575 		sgl32.mloc_addr = 0;
576 		sgl32.mloc_size = 0;
577 		err = ddi_copyout(&sgl32, &usgl32[i], sizeof (xsvc_mloc_32),
578 		    mode);
579 		if (err != 0) {
580 			err = EFAULT;
581 			goto allocfail_copyout;
582 		}
583 	} else {
584 		sgl.mloc_addr = 0;
585 		sgl.mloc_size = 0;
586 		err = ddi_copyout(&sgl, &usgl[i], sizeof (xsvc_mloc), mode);
587 		if (err != 0) {
588 			err = EFAULT;
589 			goto allocfail_copyout;
590 		}
591 	}
592 
593 	return (0);
594 
595 allocfail_copyout:
596 	(void) ddi_dma_unbind_handle(mp->xm_dma_handle);
597 allocfail_bind:
598 	ddi_dma_mem_free(&mp->xm_mem_handle);
599 allocfail_alloc_mem:
600 	ddi_dma_free_handle(&mp->xm_dma_handle);
601 allocfail_alloc_handle:
602 	mp->xm_dma_handle = NULL;
603 	xsvc_mem_free(state, mp);
604 
605 	mutex_enter(&state->xs_mutex);
606 	state->xs_currently_alloced = state->xs_currently_alloced - size;
607 	mutex_exit(&state->xs_mutex);
608 
609 	return (err);
610 }
611 
612 /*
613  * xsvc_ioctl_flush_memory()
614  *
615  */
616 static int
617 xsvc_ioctl_flush_memory(xsvc_state_t *state, void *arg, int mode)
618 {
619 	xsvc_mem_req_32 params32;
620 	xsvc_mem_req params;
621 	xsvc_mem_t *mp;
622 	uint64_t key;
623 	int err;
624 
625 
626 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
627 		err = ddi_copyin(arg, &params32, sizeof (xsvc_mem_req_32),
628 		    mode);
629 		if (err != 0) {
630 			return (EFAULT);
631 		}
632 		key = (uint64_t)params32.xsvc_mem_reqid;
633 	} else {
634 		err = ddi_copyin(arg, &params, sizeof (xsvc_mem_req), mode);
635 		if (err != 0) {
636 			return (EFAULT);
637 		}
638 		key = (uint64_t)params.xsvc_mem_reqid;
639 	}
640 
641 	/* find the memory */
642 	mp = xsvc_mem_lookup(state, key);
643 	if (mp == NULL) {
644 		return (EINVAL);
645 	}
646 
647 	(void) ddi_dma_sync(mp->xm_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
648 
649 	return (0);
650 }
651 
652 
653 /*
654  * xsvc_ioctl_free_memory()
655  *
656  */
657 static int
658 xsvc_ioctl_free_memory(xsvc_state_t *state, void *arg, int mode)
659 {
660 	xsvc_mem_req_32 params32;
661 	xsvc_mem_req params;
662 	xsvc_mem_t *mp;
663 	uint64_t key;
664 	int err;
665 
666 
667 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
668 		err = ddi_copyin(arg, &params32, sizeof (xsvc_mem_req_32),
669 		    mode);
670 		if (err != 0) {
671 			return (EFAULT);
672 		}
673 		key = (uint64_t)params32.xsvc_mem_reqid;
674 	} else {
675 		err = ddi_copyin(arg, &params, sizeof (xsvc_mem_req), mode);
676 		if (err != 0) {
677 			return (EFAULT);
678 		}
679 		key = (uint64_t)params.xsvc_mem_reqid;
680 	}
681 
682 	/* find the memory */
683 	mp = xsvc_mem_lookup(state, key);
684 	if (mp == NULL) {
685 		return (EINVAL);
686 	}
687 
688 	xsvc_mem_free(state, mp);
689 
690 	return (0);
691 }
692 
693 /*
694  * xsvc_mem_alloc()
695  *
696  */
697 static int
698 xsvc_mem_alloc(xsvc_state_t *state, uint64_t key, xsvc_mem_t **mp)
699 {
700 	xsvc_mem_t *mem;
701 
702 	mem = xsvc_mem_lookup(state, key);
703 	if (mem != NULL) {
704 		xsvc_mem_free(state, mem);
705 	}
706 
707 	*mp = kmem_alloc(sizeof (xsvc_mem_t), KM_SLEEP);
708 	(*mp)->xm_mnode.mn_home = *mp;
709 	(*mp)->xm_mnode.mn_key = key;
710 
711 	mutex_enter(&state->xs_mlist.ml_mutex);
712 	avl_add(&state->xs_mlist.ml_avl, &(*mp)->xm_mnode);
713 	mutex_exit(&state->xs_mlist.ml_mutex);
714 
715 	return (0);
716 }
717 
718 /*
719  * xsvc_mem_free()
720  *
721  */
722 static void
723 xsvc_mem_free(xsvc_state_t *state, xsvc_mem_t *mp)
724 {
725 	if (mp->xm_dma_handle != NULL) {
726 		(void) ddi_dma_unbind_handle(mp->xm_dma_handle);
727 		ddi_dma_mem_free(&mp->xm_mem_handle);
728 		ddi_dma_free_handle(&mp->xm_dma_handle);
729 
730 		mutex_enter(&state->xs_mutex);
731 		state->xs_currently_alloced = state->xs_currently_alloced -
732 		    mp->xm_size;
733 		mutex_exit(&state->xs_mutex);
734 	}
735 
736 	mutex_enter(&state->xs_mlist.ml_mutex);
737 	avl_remove(&state->xs_mlist.ml_avl, &mp->xm_mnode);
738 	mutex_exit(&state->xs_mlist.ml_mutex);
739 
740 	kmem_free(mp, sizeof (*mp));
741 }
742 
743 /*
744  * xsvc_mem_lookup()
745  *
746  */
747 static xsvc_mem_t *
748 xsvc_mem_lookup(xsvc_state_t *state, uint64_t key)
749 {
750 	xsvc_mnode_t mnode;
751 	xsvc_mnode_t *mnp;
752 	avl_index_t where;
753 	xsvc_mem_t *mp;
754 
755 	mnode.mn_key = key;
756 	mutex_enter(&state->xs_mlist.ml_mutex);
757 	mnp = avl_find(&state->xs_mlist.ml_avl, &mnode, &where);
758 	mutex_exit(&state->xs_mlist.ml_mutex);
759 
760 	if (mnp != NULL) {
761 		mp = mnp->mn_home;
762 	} else {
763 		mp = NULL;
764 	}
765 
766 	return (mp);
767 }
768 
769 /*
770  * xsvc_mnode_key_compare()
771  *
772  */
773 static int
774 xsvc_mnode_key_compare(const void *q, const void *e)
775 {
776 	xsvc_mnode_t *n1;
777 	xsvc_mnode_t *n2;
778 
779 	n1 = (xsvc_mnode_t *)q;
780 	n2 = (xsvc_mnode_t *)e;
781 
782 	if (n1->mn_key < n2->mn_key) {
783 		return (-1);
784 	} else if (n1->mn_key > n2->mn_key) {
785 		return (1);
786 	} else {
787 		return (0);
788 	}
789 }
790 
791 /*
792  * xsvc_devmap()
793  *
794  */
795 /*ARGSUSED*/
796 static int
797 xsvc_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
798 		size_t *maplen, uint_t model)
799 {
800 	ddi_umem_cookie_t cookie;
801 	xsvc_state_t *state;
802 	offset_t off_align;
803 	size_t npages;
804 	caddr_t kvai;
805 	size_t psize;
806 	int instance;
807 	caddr_t kva;
808 	pfn_t pfn;
809 	int err;
810 	int i;
811 
812 
813 	instance = getminor(dev);
814 	state = ddi_get_soft_state(xsvc_statep, instance);
815 	if (state == NULL) {
816 		return (ENXIO);
817 	}
818 
819 	/*
820 	 * On 64-bit kernels, if we have a 32-bit application doing a mmap(),
821 	 * smmap32 will sign extend the offset. We need to undo that since
822 	 * we are passed a physical address in off, not a offset.
823 	 */
824 #if defined(__amd64)
825 	if (((model & DDI_MODEL_MASK) == DDI_MODEL_ILP32) &&
826 	    ((off & ~0xFFFFFFFFll) == ~0xFFFFFFFFll)) {
827 		off = off & 0xFFFFFFFF;
828 	}
829 #endif
830 
831 	pfn = btop(off);
832 
833 	/* always work with whole pages */
834 	off_align = P2ALIGN(off, PAGESIZE);
835 	psize = P2ROUNDUP(off + len, PAGESIZE) - off_align;
836 
837 	/*
838 	 * if this is memory we're trying to map into user space, we first
839 	 * need to map the PFNs into KVA, then build up a umem cookie, and
840 	 * finally do a umem_setup to map it in.
841 	 */
842 	if (pf_is_memory(pfn)) {
843 		npages = btop(psize);
844 
845 		kva = vmem_alloc(heap_arena, psize, VM_SLEEP);
846 		if (kva == NULL) {
847 			return (-1);
848 		}
849 
850 		kvai = kva;
851 		for (i = 0; i < npages; i++) {
852 			hat_devload(kas.a_hat, kvai, PAGESIZE, pfn,
853 			    PROT_READ | PROT_WRITE, HAT_LOAD_LOCK);
854 			pfn++;
855 			kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
856 		}
857 
858 		err = xsvc_umem_cookie_alloc(kva, psize, KM_SLEEP, &cookie);
859 		if (err != 0) {
860 			goto devmapfail_cookie_alloc;
861 		}
862 
863 		if ((err = devmap_umem_setup(dhp, state->xs_dip, &xsvc_callbk,
864 		    cookie, 0, psize, PROT_ALL, 0, &xsvc_device_attr)) < 0) {
865 			goto devmapfail_umem_setup;
866 		}
867 		*maplen = psize;
868 
869 	/* Is this is not memory, go through devmem_setup */
870 	} else {
871 		if ((err = devmap_devmem_setup(dhp, state->xs_dip, NULL, 0,
872 		    off_align, psize, PROT_ALL, 0, &xsvc_device_attr)) < 0) {
873 			return (err);
874 		}
875 		*maplen = psize;
876 	}
877 
878 	return (0);
879 
880 devmapfail_umem_setup:
881 	xsvc_umem_cookie_free(&cookie);
882 
883 devmapfail_cookie_alloc:
884 	kvai = kva;
885 	for (i = 0; i < npages; i++) {
886 		hat_unload(kas.a_hat, kvai, PAGESIZE,
887 		    HAT_UNLOAD_UNLOCK);
888 		kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
889 	}
890 	vmem_free(heap_arena, kva, psize);
891 
892 	return (err);
893 }
894 
895 /*
896  * xsvc_umem_cookie_alloc()
897  *
898  *   allocate a umem cookie to be used in devmap_umem_setup using KVA already
899  *   allocated.
900  */
901 int
902 xsvc_umem_cookie_alloc(caddr_t kva, size_t size, int flags,
903     ddi_umem_cookie_t *cookiep)
904 {
905 	struct ddi_umem_cookie *umem_cookiep;
906 
907 	umem_cookiep = kmem_zalloc(sizeof (struct ddi_umem_cookie), flags);
908 	if (umem_cookiep == NULL) {
909 		*cookiep = NULL;
910 		return (-1);
911 	}
912 
913 	umem_cookiep->cvaddr = kva;
914 	umem_cookiep->type = KMEM_NON_PAGEABLE;
915 	umem_cookiep->size = size;
916 	*cookiep = (ddi_umem_cookie_t *)umem_cookiep;
917 
918 	return (0);
919 }
920 
921 /*
922  * xsvc_umem_cookie_free()
923  *
924  */
925 static void
926 xsvc_umem_cookie_free(ddi_umem_cookie_t *cookiep)
927 {
928 	kmem_free(*cookiep, sizeof (struct ddi_umem_cookie));
929 	*cookiep = NULL;
930 }
931 
932 /*
933  * xsvc_devmap_unmap()
934  *
935  *   This routine is only call if we were mapping in memory in xsvc_devmap().
936  *   i.e. we only pass in xsvc_callbk to devmap_umem_setup if pf_is_memory()
937  *   was true. It would have been nice if devmap_callback_ctl had an args param.
938  *   We wouldn't have had to look into the devmap_handle and into the umem
939  *   cookie.
940  */
941 /*ARGSUSED*/
942 static void
943 xsvc_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
944     devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
945     void **new_pvtp2)
946 {
947 	struct ddi_umem_cookie *cp;
948 	devmap_handle_t *dhp;
949 	size_t npages;
950 	caddr_t kvai;
951 	caddr_t kva;
952 	size_t size;
953 	int i;
954 
955 
956 	/* peek into the umem cookie to figure out what we need to free up */
957 	dhp = (devmap_handle_t *)dhc;
958 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
959 	kva = cp->cvaddr;
960 	size = cp->size;
961 
962 	/*
963 	 * free up the umem cookie, then unmap all the pages what we mapped
964 	 * in during devmap, then free up the kva space.
965 	 */
966 	npages = btop(size);
967 	xsvc_umem_cookie_free(&dhp->dh_cookie);
968 	kvai = kva;
969 	for (i = 0; i < npages; i++) {
970 		hat_unload(kas.a_hat, kvai, PAGESIZE, HAT_UNLOAD_UNLOCK);
971 		kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
972 	}
973 	vmem_free(heap_arena, kva, size);
974 }
975