xref: /titanic_50/usr/src/uts/intel/io/iommulib.c (revision a4faba164aa153855d621c694fc5aa75dd183b81)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"@(#)iommulib.c	1.6	08/09/07 SMI"
27 
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/errno.h>
31 #include <sys/modctl.h>
32 #include <sys/iommulib.h>
33 
34 /* ******** Type definitions private to this file  ********************** */
35 
36 /* 1 per IOMMU unit. There may be more than one per dip */
37 typedef struct iommulib_unit {
38 	kmutex_t ilu_lock;
39 	uint64_t ilu_ref;
40 	uint32_t ilu_unitid;
41 	dev_info_t *ilu_dip;
42 	iommulib_ops_t *ilu_ops;
43 	void* ilu_data;
44 	struct iommulib_unit *ilu_next;
45 	struct iommulib_unit *ilu_prev;
46 } iommulib_unit_t;
47 
48 typedef struct iommulib_nex {
49 	dev_info_t *nex_dip;
50 	iommulib_nexops_t nex_ops;
51 	struct iommulib_nex *nex_next;
52 	struct iommulib_nex *nex_prev;
53 } iommulib_nex_t;
54 
55 /* *********  Globals ************************ */
56 
57 /* For IOMMU drivers */
58 smbios_hdl_t *iommulib_smbios;
59 
60 /* IOMMU side: Following data protected by lock */
61 static kmutex_t iommulib_lock;
62 static iommulib_unit_t   *iommulib_list;
63 static uint64_t iommulib_unit_ids = 0;
64 static uint64_t iommulib_num_units = 0;
65 
66 /* rootnex side data */
67 
68 static kmutex_t iommulib_nexus_lock;
69 static iommulib_nex_t *iommulib_nexus_list;
70 
71 /* can be set atomically without lock */
72 static volatile uint32_t iommulib_fini;
73 
74 /* debug flag */
75 static int iommulib_debug;
76 
77 /*
78  * Module linkage information for the kernel.
79  */
80 static struct modlmisc modlmisc = {
81 	&mod_miscops, "IOMMU library module"
82 };
83 
84 static struct modlinkage modlinkage = {
85 	MODREV_1, (void *)&modlmisc, NULL
86 };
87 
88 int
89 _init(void)
90 {
91 	return (mod_install(&modlinkage));
92 }
93 
94 int
95 _fini(void)
96 {
97 	mutex_enter(&iommulib_lock);
98 	if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
99 		mutex_exit(&iommulib_lock);
100 		return (EBUSY);
101 	}
102 	iommulib_fini = 1;
103 
104 	mutex_exit(&iommulib_lock);
105 	return (mod_remove(&modlinkage));
106 }
107 
108 int
109 _info(struct modinfo *modinfop)
110 {
111 	return (mod_info(&modlinkage, modinfop));
112 }
113 
114 /*
115  * Routines with iommulib_iommu_* are invoked from the
116  * IOMMU driver.
117  * Routines with iommulib_nex* are invoked from the
118  * nexus driver (typically rootnex)
119  */
120 
121 int
122 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
123     iommulib_nexhandle_t *handle)
124 {
125 	iommulib_nex_t *nexp;
126 	int instance = ddi_get_instance(dip);
127 	const char *driver = ddi_driver_name(dip);
128 	dev_info_t *pdip = ddi_get_parent(dip);
129 	const char *f = "iommulib_nexus_register";
130 
131 	ASSERT(nexops);
132 	ASSERT(handle);
133 
134 	*handle = NULL;
135 
136 	/*
137 	 * Root node is never busy held
138 	 */
139 	if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
140 	    !DEVI_BUSY_OWNED(pdip))) {
141 		cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
142 		    "or busy held for nexops vector (%p). Failing registration",
143 		    f, (void *)nexops);
144 		return (DDI_FAILURE);
145 	}
146 
147 	if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
148 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
149 		    "in nexops vector (%p). Failing NEXUS registration",
150 		    f, driver, instance, (void *)nexops);
151 		return (DDI_FAILURE);
152 	}
153 
154 	ASSERT(nexops->nops_data == NULL);
155 
156 	if (nexops->nops_id == NULL) {
157 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
158 		    "Failing registration for nexops vector: %p",
159 		    f, driver, instance, (void *)nexops);
160 		return (DDI_FAILURE);
161 	}
162 
163 	if (nexops->nops_dma_allochdl == NULL) {
164 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
165 		    "Failing registration for ops vector: %p", f,
166 		    driver, instance, (void *)nexops);
167 		return (DDI_FAILURE);
168 	}
169 
170 	if (nexops->nops_dma_freehdl == NULL) {
171 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
172 		    "Failing registration for ops vector: %p", f,
173 		    driver, instance, (void *)nexops);
174 		return (DDI_FAILURE);
175 	}
176 
177 	if (nexops->nops_dma_bindhdl == NULL) {
178 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
179 		    "Failing registration for ops vector: %p", f,
180 		    driver, instance, (void *)nexops);
181 		return (DDI_FAILURE);
182 	}
183 
184 	if (nexops->nops_dma_sync == NULL) {
185 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
186 		    "Failing registration for ops vector: %p", f,
187 		    driver, instance, (void *)nexops);
188 		return (DDI_FAILURE);
189 	}
190 
191 	if (nexops->nops_dma_reset_cookies == NULL) {
192 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
193 		    "Failing registration for ops vector: %p", f,
194 		    driver, instance, (void *)nexops);
195 		return (DDI_FAILURE);
196 	}
197 
198 	if (nexops->nops_dma_get_cookies == NULL) {
199 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
200 		    "Failing registration for ops vector: %p", f,
201 		    driver, instance, (void *)nexops);
202 		return (DDI_FAILURE);
203 	}
204 
205 	if (nexops->nops_dma_set_cookies == NULL) {
206 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
207 		    "Failing registration for ops vector: %p", f,
208 		    driver, instance, (void *)nexops);
209 		return (DDI_FAILURE);
210 	}
211 
212 	if (nexops->nops_dma_clear_cookies == NULL) {
213 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
214 		    "Failing registration for ops vector: %p", f,
215 		    driver, instance, (void *)nexops);
216 		return (DDI_FAILURE);
217 	}
218 
219 	if (nexops->nops_dma_get_sleep_flags == NULL) {
220 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
221 		    "Failing registration for ops vector: %p", f,
222 		    driver, instance, (void *)nexops);
223 		return (DDI_FAILURE);
224 	}
225 
226 	if (nexops->nops_dma_win == NULL) {
227 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
228 		    "Failing registration for ops vector: %p", f,
229 		    driver, instance, (void *)nexops);
230 		return (DDI_FAILURE);
231 	}
232 
233 	/* Check for legacy ops */
234 	if (nexops->nops_dma_map == NULL) {
235 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. "
236 		    "Failing registration for ops vector: %p", f,
237 		    driver, instance, (void *)nexops);
238 		return (DDI_FAILURE);
239 	}
240 
241 	if (nexops->nops_dma_mctl == NULL) {
242 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. "
243 		    "Failing registration for ops vector: %p", f,
244 		    driver, instance, (void *)nexops);
245 		return (DDI_FAILURE);
246 	}
247 
248 	nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
249 
250 	mutex_enter(&iommulib_lock);
251 	if (iommulib_fini == 1) {
252 		mutex_exit(&iommulib_lock);
253 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
254 		    "Failing NEXUS register.", f);
255 		kmem_free(nexp, sizeof (iommulib_nex_t));
256 		return (DDI_FAILURE);
257 	}
258 
259 	/*
260 	 * fini/register race conditions have been handled. Now create the
261 	 * nexus struct
262 	 */
263 	ndi_hold_devi(dip);
264 	nexp->nex_dip = dip;
265 	nexp->nex_ops = *nexops;
266 
267 	mutex_enter(&iommulib_nexus_lock);
268 	nexp->nex_next = iommulib_nexus_list;
269 	iommulib_nexus_list = nexp;
270 	nexp->nex_prev = NULL;
271 
272 	if (nexp->nex_next != NULL)
273 		nexp->nex_next->nex_prev = nexp;
274 
275 	mutex_exit(&iommulib_nexus_lock);
276 	mutex_exit(&iommulib_lock);
277 
278 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
279 	    "nexops=%p", f, driver, instance, ddi_node_name(dip),
280 	    (void *)nexops);
281 
282 	*handle = nexp;
283 
284 	return (DDI_SUCCESS);
285 }
286 
287 int
288 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
289 {
290 	dev_info_t *dip;
291 	int instance;
292 	const char *driver;
293 	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
294 	const char *f = "iommulib_nexus_unregister";
295 
296 	ASSERT(nexp);
297 
298 	mutex_enter(&iommulib_nexus_lock);
299 
300 	dip = nexp->nex_dip;
301 	driver = ddi_driver_name(dip);
302 	instance = ddi_get_instance(dip);
303 
304 	/* A future enhancement would be to add ref-counts */
305 
306 	if (nexp->nex_prev == NULL) {
307 		iommulib_nexus_list = nexp->nex_next;
308 	} else {
309 		nexp->nex_prev->nex_next = nexp->nex_next;
310 	}
311 
312 	if (nexp->nex_next != NULL)
313 		nexp->nex_next->nex_prev = nexp->nex_prev;
314 
315 	mutex_exit(&iommulib_nexus_lock);
316 
317 	kmem_free(nexp, sizeof (iommulib_nex_t));
318 
319 	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
320 	    "unregistered from IOMMULIB", f, driver, instance,
321 	    ddi_node_name(dip));
322 
323 	ndi_rele_devi(dip);
324 
325 	return (DDI_SUCCESS);
326 }
327 
328 static iommulib_nexops_t *
329 lookup_nexops(dev_info_t *dip)
330 {
331 	iommulib_nex_t  *nexp;
332 
333 	mutex_enter(&iommulib_nexus_lock);
334 	nexp = iommulib_nexus_list;
335 	while (nexp) {
336 		if (nexp->nex_dip == dip)
337 			break;
338 		nexp = nexp->nex_next;
339 	}
340 	mutex_exit(&iommulib_nexus_lock);
341 
342 	return (nexp ? &nexp->nex_ops : NULL);
343 }
344 
345 int
346 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
347     iommulib_handle_t *handle)
348 {
349 	const char *vendor;
350 	iommulib_unit_t *unitp;
351 	int instance = ddi_get_instance(dip);
352 	const char *driver = ddi_driver_name(dip);
353 	dev_info_t *pdip = ddi_get_parent(dip);
354 	const char *f = "iommulib_register";
355 
356 	ASSERT(ops);
357 	ASSERT(handle);
358 
359 	if (i_ddi_node_state(dip) < DS_PROBED || !DEVI_BUSY_OWNED(pdip)) {
360 		cmn_err(CE_WARN, "%s: devinfo node not in DS_PROBED or "
361 		    "busy held for ops vector (%p). Failing registration",
362 		    f, (void *)ops);
363 		return (DDI_FAILURE);
364 	}
365 
366 
367 	if (ops->ilops_vers != IOMMU_OPS_VERSION) {
368 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
369 		    "in ops vector (%p). Failing registration", f, driver,
370 		    instance, (void *)ops);
371 		return (DDI_FAILURE);
372 	}
373 
374 	switch (ops->ilops_vendor) {
375 	case AMD_IOMMU:
376 		vendor = "AMD";
377 		break;
378 	case INTEL_IOMMU:
379 		vendor = "Intel";
380 		break;
381 	case INVALID_VENDOR:
382 		cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
383 		    "Failing registration for ops vector: %p", f,
384 		    driver, instance, ops->ilops_vendor, (void *)ops);
385 		return (DDI_FAILURE);
386 	default:
387 		cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
388 		    "Failing registration for ops vector: %p", f,
389 		    driver, instance, ops->ilops_vendor, (void *)ops);
390 		return (DDI_FAILURE);
391 	}
392 
393 	cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
394 	    " %s", f, driver, instance, vendor);
395 
396 	if (ops->ilops_data == NULL) {
397 		cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
398 		    "Failing registration for ops vector: %p", f,
399 		    driver, instance, (void *)ops);
400 		return (DDI_FAILURE);
401 	}
402 
403 	if (ops->ilops_id == NULL) {
404 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
405 		    "Failing registration for ops vector: %p", f,
406 		    driver, instance, (void *)ops);
407 		return (DDI_FAILURE);
408 	}
409 
410 	if (ops->ilops_probe == NULL) {
411 		cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
412 		    "Failing registration for ops vector: %p", f,
413 		    driver, instance, (void *)ops);
414 		return (DDI_FAILURE);
415 	}
416 
417 	if (ops->ilops_dma_allochdl == NULL) {
418 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
419 		    "Failing registration for ops vector: %p", f,
420 		    driver, instance, (void *)ops);
421 		return (DDI_FAILURE);
422 	}
423 
424 	if (ops->ilops_dma_freehdl == NULL) {
425 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
426 		    "Failing registration for ops vector: %p", f,
427 		    driver, instance, (void *)ops);
428 		return (DDI_FAILURE);
429 	}
430 
431 	if (ops->ilops_dma_bindhdl == NULL) {
432 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
433 		    "Failing registration for ops vector: %p", f,
434 		    driver, instance, (void *)ops);
435 		return (DDI_FAILURE);
436 	}
437 
438 	if (ops->ilops_dma_sync == NULL) {
439 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
440 		    "Failing registration for ops vector: %p", f,
441 		    driver, instance, (void *)ops);
442 		return (DDI_FAILURE);
443 	}
444 
445 	if (ops->ilops_dma_win == NULL) {
446 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
447 		    "Failing registration for ops vector: %p", f,
448 		    driver, instance, (void *)ops);
449 		return (DDI_FAILURE);
450 	}
451 
452 	/* Check for legacy ops */
453 	if (ops->ilops_dma_map == NULL) {
454 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. "
455 		    "Failing registration for ops vector: %p", f,
456 		    driver, instance, (void *)ops);
457 		return (DDI_FAILURE);
458 	}
459 
460 	if (ops->ilops_dma_mctl == NULL) {
461 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. "
462 		    "Failing registration for ops vector: %p", f,
463 		    driver, instance, (void *)ops);
464 		return (DDI_FAILURE);
465 	}
466 
467 	unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
468 	mutex_enter(&iommulib_lock);
469 	if (iommulib_fini == 1) {
470 		mutex_exit(&iommulib_lock);
471 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
472 		    f);
473 		kmem_free(unitp, sizeof (iommulib_unit_t));
474 		return (DDI_FAILURE);
475 	}
476 
477 	/*
478 	 * fini/register race conditions have been handled. Now create the
479 	 * IOMMU unit
480 	 */
481 	mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
482 
483 	mutex_enter(&unitp->ilu_lock);
484 	unitp->ilu_unitid = ++iommulib_unit_ids;
485 	unitp->ilu_ref = 0;
486 	ndi_hold_devi(dip);
487 	unitp->ilu_dip = dip;
488 	unitp->ilu_ops = ops;
489 	unitp->ilu_data = ops->ilops_data;
490 
491 	unitp->ilu_next = iommulib_list;
492 	iommulib_list = unitp;
493 	unitp->ilu_prev = NULL;
494 	if (unitp->ilu_next)
495 		unitp->ilu_next->ilu_prev = unitp;
496 
497 	mutex_exit(&unitp->ilu_lock);
498 
499 	iommulib_num_units++;
500 
501 	*handle = unitp;
502 
503 	mutex_exit(&iommulib_lock);
504 
505 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
506 	    "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
507 	    f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
508 	    unitp->ilu_unitid);
509 
510 	return (DDI_SUCCESS);
511 }
512 
513 int
514 iommulib_iommu_unregister(iommulib_handle_t handle)
515 {
516 	uint32_t unitid;
517 	dev_info_t *dip;
518 	int instance;
519 	const char *driver;
520 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
521 	const char *f = "iommulib_unregister";
522 
523 	ASSERT(unitp);
524 
525 	mutex_enter(&iommulib_lock);
526 	mutex_enter(&unitp->ilu_lock);
527 
528 	unitid = unitp->ilu_unitid;
529 	dip = unitp->ilu_dip;
530 	driver = ddi_driver_name(dip);
531 	instance = ddi_get_instance(dip);
532 
533 	if (unitp->ilu_ref != 0) {
534 		mutex_exit(&unitp->ilu_lock);
535 		mutex_exit(&iommulib_lock);
536 		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
537 		    "unregister IOMMULIB unitid %u",
538 		    f, driver, instance, unitid);
539 		return (DDI_FAILURE);
540 	}
541 	unitp->ilu_unitid = 0;
542 	ASSERT(unitp->ilu_ref == 0);
543 
544 	if (unitp->ilu_prev == NULL) {
545 		iommulib_list = unitp->ilu_next;
546 		unitp->ilu_next->ilu_prev = NULL;
547 	} else {
548 		unitp->ilu_prev->ilu_next = unitp->ilu_next;
549 		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
550 	}
551 
552 	iommulib_num_units--;
553 
554 	mutex_exit(&unitp->ilu_lock);
555 
556 	mutex_destroy(&unitp->ilu_lock);
557 	kmem_free(unitp, sizeof (iommulib_unit_t));
558 
559 	mutex_exit(&iommulib_lock);
560 
561 	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
562 	    "unregistered", f, driver, instance, unitid);
563 
564 	ndi_rele_devi(dip);
565 
566 	return (DDI_SUCCESS);
567 }
568 
569 int
570 iommulib_nex_open(dev_info_t *rdip, uint_t *errorp)
571 {
572 	iommulib_unit_t *unitp;
573 	int instance = ddi_get_instance(rdip);
574 	const char *driver = ddi_driver_name(rdip);
575 	const char *f = "iommulib_nex_open";
576 
577 	*errorp = 0;
578 
579 	if (IOMMU_USED(rdip))
580 		return (DDI_SUCCESS);
581 
582 	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
583 
584 	/* prevent use of IOMMU for AMD IOMMU's DMA */
585 	if (strcmp(driver, "amd_iommu") == 0) {
586 		*errorp = ENOTSUP;
587 		return (DDI_FAILURE);
588 	}
589 
590 	/*
591 	 * Use the probe entry point to determine in a hardware specific
592 	 * manner whether this dip is controlled by an IOMMU. If yes,
593 	 * return the handle corresponding to the IOMMU unit.
594 	 */
595 
596 	mutex_enter(&iommulib_lock);
597 	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
598 		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
599 			break;
600 	}
601 
602 	if (unitp == NULL) {
603 		mutex_exit(&iommulib_lock);
604 		if (iommulib_debug) {
605 			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
606 			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
607 			    "controlled by an IOMMU: path=%s", f, driver,
608 			    instance, (void *)rdip, ddi_pathname(rdip, buf));
609 			kmem_free(buf, MAXPATHLEN);
610 		}
611 		*errorp = ENOTSUP;
612 		return (DDI_FAILURE);
613 	}
614 
615 	mutex_enter(&unitp->ilu_lock);
616 	unitp->ilu_ref++;
617 	mutex_exit(&unitp->ilu_lock);
618 	mutex_exit(&iommulib_lock);
619 
620 	DEVI(rdip)->devi_iommulib_handle = unitp;
621 
622 	return (DDI_SUCCESS);
623 }
624 
625 void
626 iommulib_nex_close(dev_info_t *rdip)
627 {
628 	iommulib_unit_t *unitp;
629 	const char *driver;
630 	int instance;
631 	uint32_t unitid;
632 	const char *f = "iommulib_nex_close";
633 
634 	unitp = (iommulib_unit_t *)DEVI(rdip)->devi_iommulib_handle;
635 	if (unitp == NULL)
636 		return;
637 
638 	DEVI(rdip)->devi_iommulib_handle = NULL;
639 
640 	mutex_enter(&iommulib_lock);
641 	mutex_enter(&unitp->ilu_lock);
642 	unitid = unitp->ilu_unitid;
643 	driver = ddi_driver_name(unitp->ilu_dip);
644 	instance = ddi_get_instance(unitp->ilu_dip);
645 	unitp->ilu_ref--;
646 	mutex_exit(&unitp->ilu_lock);
647 	mutex_exit(&iommulib_lock);
648 
649 	if (iommulib_debug) {
650 		char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
651 		(void) ddi_pathname(rdip, buf);
652 		cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
653 		    "unitid=%u rdip path = %s", f, driver, instance,
654 		    (void *)rdip, unitid, buf);
655 		kmem_free(buf, MAXPATHLEN);
656 	}
657 }
658 
659 int
660 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
661     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
662     caddr_t arg, ddi_dma_handle_t *dma_handlep)
663 {
664 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
665 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
666 
667 	ASSERT(unitp);
668 
669 	/* No need to grab lock - the handle is reference counted */
670 	return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
671 	    attr, waitfp, arg, dma_handlep));
672 }
673 
674 int
675 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
676     ddi_dma_handle_t dma_handle)
677 {
678 	int error;
679 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
680 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
681 
682 	ASSERT(unitp);
683 
684 	/* No need to grab lock - the handle is reference counted */
685 	error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
686 	    rdip, dma_handle);
687 
688 	return (error);
689 }
690 
691 int
692 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
693     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
694     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
695 {
696 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
697 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
698 
699 	ASSERT(unitp);
700 
701 	/* No need to grab lock - the handle is reference counted */
702 	return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
703 	    dmareq, cookiep, ccountp));
704 }
705 
706 int
707 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
708     ddi_dma_handle_t dma_handle)
709 {
710 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
711 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
712 
713 	ASSERT(unitp);
714 
715 	/* No need to grab lock - the handle is reference counted */
716 	return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
717 	    dma_handle));
718 }
719 
720 int
721 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
722     ddi_dma_handle_t dma_handle, off_t off, size_t len,
723     uint_t cache_flags)
724 {
725 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
726 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
727 
728 	ASSERT(unitp);
729 
730 	/* No need to grab lock - the handle is reference counted */
731 	return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
732 	    off, len, cache_flags));
733 }
734 
735 int
736 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
737     ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
738     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
739 {
740 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
741 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
742 
743 	ASSERT(unitp);
744 
745 	/* No need to grab lock - the handle is reference counted */
746 	return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
747 	    win, offp, lenp, cookiep, ccountp));
748 }
749 
750 /* Obsolete DMA routines */
751 
752 int
753 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip,
754     struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle)
755 {
756 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
757 	iommulib_unit_t *unitp = handle;
758 
759 	ASSERT(unitp);
760 
761 	/* No need to grab lock - the handle is reference counted */
762 	return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq,
763 	    dma_handle));
764 }
765 
766 int
767 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip,
768     ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request,
769     off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags)
770 {
771 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
772 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
773 
774 	ASSERT(unitp);
775 
776 	/* No need to grab lock - the handle is reference counted */
777 	return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle,
778 	    request, offp, lenp, objpp, cache_flags));
779 }
780 
781 /* Utility routines invoked by IOMMU drivers */
782 int
783 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
784     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
785     ddi_dma_handle_t *handlep)
786 {
787 	iommulib_nexops_t *nexops = lookup_nexops(dip);
788 	if (nexops == NULL)
789 		return (DDI_FAILURE);
790 	return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
791 	    handlep));
792 }
793 
794 int
795 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
796     ddi_dma_handle_t handle)
797 {
798 	iommulib_nexops_t *nexops = lookup_nexops(dip);
799 	if (nexops == NULL)
800 		return (DDI_FAILURE);
801 	return (nexops->nops_dma_freehdl(dip, rdip, handle));
802 }
803 
804 int
805 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
806     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
807     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
808 {
809 	iommulib_nexops_t *nexops = lookup_nexops(dip);
810 	if (nexops == NULL)
811 		return (DDI_FAILURE);
812 	return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
813 	    cookiep, ccountp));
814 }
815 
816 int
817 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
818     ddi_dma_handle_t handle)
819 {
820 	iommulib_nexops_t *nexops = lookup_nexops(dip);
821 	if (nexops == NULL)
822 		return (DDI_FAILURE);
823 	return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
824 }
825 
826 void
827 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
828 {
829 	iommulib_nexops_t *nexops = lookup_nexops(dip);
830 	nexops->nops_dma_reset_cookies(dip, handle);
831 }
832 
833 int
834 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
835     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
836 {
837 	iommulib_nexops_t *nexops = lookup_nexops(dip);
838 	if (nexops == NULL)
839 		return (DDI_FAILURE);
840 	return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
841 }
842 
843 int
844 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
845     ddi_dma_cookie_t *cookiep, uint_t ccount)
846 {
847 	iommulib_nexops_t *nexops = lookup_nexops(dip);
848 	if (nexops == NULL)
849 		return (DDI_FAILURE);
850 	return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
851 }
852 
853 int
854 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
855 {
856 	iommulib_nexops_t *nexops = lookup_nexops(dip);
857 	if (nexops == NULL)
858 		return (DDI_FAILURE);
859 	return (nexops->nops_dma_clear_cookies(dip, handle));
860 }
861 
862 int
863 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
864 {
865 	iommulib_nexops_t *nexops = lookup_nexops(dip);
866 	if (nexops == NULL)
867 		return (DDI_FAILURE);
868 	return (nexops->nops_dma_get_sleep_flags(handle));
869 }
870 
871 int
872 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
873     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
874 {
875 	iommulib_nexops_t *nexops = lookup_nexops(dip);
876 	if (nexops == NULL)
877 		return (DDI_FAILURE);
878 	return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
879 	    cache_flags));
880 }
881 
882 int
883 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
884     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
885     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
886 {
887 	iommulib_nexops_t *nexops = lookup_nexops(dip);
888 	if (nexops == NULL)
889 		return (DDI_FAILURE);
890 	return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
891 	    cookiep, ccountp));
892 }
893 
894 int
895 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
896     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
897 {
898 	iommulib_nexops_t *nexops = lookup_nexops(dip);
899 	if (nexops == NULL)
900 		return (DDI_FAILURE);
901 	return (nexops->nops_dma_map(dip, rdip, dmareq, handlep));
902 }
903 
904 int
905 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
906     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
907     size_t *lenp, caddr_t *objpp, uint_t cache_flags)
908 {
909 	iommulib_nexops_t *nexops = lookup_nexops(dip);
910 	if (nexops == NULL)
911 		return (DDI_FAILURE);
912 	return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp,
913 	    objpp, cache_flags));
914 }
915 
916 int
917 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
918 {
919 	iommulib_unit_t *unitp;
920 	uint64_t unitid;
921 
922 	unitp = (iommulib_unit_t *)handle;
923 
924 	ASSERT(unitp);
925 	ASSERT(unitidp);
926 
927 	mutex_enter(&unitp->ilu_lock);
928 	unitid = unitp->ilu_unitid;
929 	mutex_exit(&unitp->ilu_lock);
930 
931 	ASSERT(unitid > 0);
932 	*unitidp = (uint64_t)unitid;
933 
934 	return (DDI_SUCCESS);
935 }
936 
937 dev_info_t *
938 iommulib_iommu_getdip(iommulib_handle_t handle)
939 {
940 	iommulib_unit_t *unitp;
941 	dev_info_t *dip;
942 
943 	unitp = (iommulib_unit_t *)handle;
944 
945 	ASSERT(unitp);
946 
947 	mutex_enter(&unitp->ilu_lock);
948 	dip = unitp->ilu_dip;
949 	ASSERT(dip);
950 	ndi_hold_devi(dip);
951 	mutex_exit(&unitp->ilu_lock);
952 
953 	return (dip);
954 }
955 
956 iommulib_ops_t *
957 iommulib_iommu_getops(iommulib_handle_t handle)
958 {
959 	iommulib_unit_t *unitp;
960 	iommulib_ops_t *ops;
961 
962 	unitp = (iommulib_unit_t *)handle;
963 
964 	ASSERT(unitp);
965 
966 	mutex_enter(&unitp->ilu_lock);
967 	ops = unitp->ilu_ops;
968 	mutex_exit(&unitp->ilu_lock);
969 
970 	ASSERT(ops);
971 
972 	return (ops);
973 }
974 
975 void *
976 iommulib_iommu_getdata(iommulib_handle_t handle)
977 {
978 	iommulib_unit_t *unitp;
979 	void *data;
980 
981 	unitp = (iommulib_unit_t *)handle;
982 
983 	ASSERT(unitp);
984 
985 	mutex_enter(&unitp->ilu_lock);
986 	data = unitp->ilu_data;
987 	mutex_exit(&unitp->ilu_lock);
988 
989 	ASSERT(data);
990 
991 	return (data);
992 }
993