xref: /titanic_51/usr/src/uts/intel/io/iommulib.c (revision 7248adcb841e89c33c8b56bb2616710a788afbf5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #pragma ident	"@(#)iommulib.c	1.6	08/09/07 SMI"
26 
27 #include <sys/sunddi.h>
28 #include <sys/sunndi.h>
29 #include <sys/errno.h>
30 #include <sys/modctl.h>
31 #include <sys/iommulib.h>
32 
33 /* ******** Type definitions private to this file  ********************** */
34 
35 /* 1 per IOMMU unit. There may be more than one per dip */
36 typedef struct iommulib_unit {
37 	kmutex_t ilu_lock;
38 	uint64_t ilu_ref;
39 	uint32_t ilu_unitid;
40 	dev_info_t *ilu_dip;
41 	iommulib_ops_t *ilu_ops;
42 	void* ilu_data;
43 	struct iommulib_unit *ilu_next;
44 	struct iommulib_unit *ilu_prev;
45 	iommulib_nexhandle_t ilu_nex;
46 } iommulib_unit_t;
47 
48 typedef struct iommulib_nex {
49 	dev_info_t *nex_dip;
50 	iommulib_nexops_t nex_ops;
51 	struct iommulib_nex *nex_next;
52 	struct iommulib_nex *nex_prev;
53 	uint_t nex_ref;
54 } iommulib_nex_t;
55 
56 /* *********  Globals ************************ */
57 
58 /* For IOMMU drivers */
59 smbios_hdl_t *iommulib_smbios;
60 
61 /* IOMMU side: Following data protected by lock */
62 static kmutex_t iommulib_lock;
63 static iommulib_unit_t   *iommulib_list;
64 static uint64_t iommulib_unit_ids = 0;
65 static uint64_t iommulib_num_units = 0;
66 
67 /* rootnex side data */
68 
69 static kmutex_t iommulib_nexus_lock;
70 static iommulib_nex_t *iommulib_nexus_list;
71 
72 /* can be set atomically without lock */
73 static volatile uint32_t iommulib_fini;
74 
75 /* debug flag */
76 static int iommulib_debug;
77 
78 /*
79  * Module linkage information for the kernel.
80  */
81 static struct modlmisc modlmisc = {
82 	&mod_miscops, "IOMMU library module"
83 };
84 
85 static struct modlinkage modlinkage = {
86 	MODREV_1, (void *)&modlmisc, NULL
87 };
88 
89 int
90 _init(void)
91 {
92 	return (mod_install(&modlinkage));
93 }
94 
95 int
96 _fini(void)
97 {
98 	mutex_enter(&iommulib_lock);
99 	if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
100 		mutex_exit(&iommulib_lock);
101 		return (EBUSY);
102 	}
103 	iommulib_fini = 1;
104 
105 	mutex_exit(&iommulib_lock);
106 	return (mod_remove(&modlinkage));
107 }
108 
109 int
110 _info(struct modinfo *modinfop)
111 {
112 	return (mod_info(&modlinkage, modinfop));
113 }
114 
115 /*
116  * Routines with iommulib_iommu_* are invoked from the
117  * IOMMU driver.
118  * Routines with iommulib_nex* are invoked from the
119  * nexus driver (typically rootnex)
120  */
121 
122 int
123 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
124     iommulib_nexhandle_t *handle)
125 {
126 	iommulib_nex_t *nexp;
127 	int instance = ddi_get_instance(dip);
128 	const char *driver = ddi_driver_name(dip);
129 	dev_info_t *pdip = ddi_get_parent(dip);
130 	const char *f = "iommulib_nexus_register";
131 
132 	ASSERT(nexops);
133 	ASSERT(handle);
134 
135 	*handle = NULL;
136 
137 	/*
138 	 * Root node is never busy held
139 	 */
140 	if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
141 	    !DEVI_BUSY_OWNED(pdip))) {
142 		cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
143 		    "or busy held for nexops vector (%p). Failing registration",
144 		    f, (void *)nexops);
145 		return (DDI_FAILURE);
146 	}
147 
148 	if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
149 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
150 		    "in nexops vector (%p). Failing NEXUS registration",
151 		    f, driver, instance, (void *)nexops);
152 		return (DDI_FAILURE);
153 	}
154 
155 	ASSERT(nexops->nops_data == NULL);
156 
157 	if (nexops->nops_id == NULL) {
158 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
159 		    "Failing registration for nexops vector: %p",
160 		    f, driver, instance, (void *)nexops);
161 		return (DDI_FAILURE);
162 	}
163 
164 	if (nexops->nops_dma_allochdl == NULL) {
165 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
166 		    "Failing registration for ops vector: %p", f,
167 		    driver, instance, (void *)nexops);
168 		return (DDI_FAILURE);
169 	}
170 
171 	if (nexops->nops_dma_freehdl == NULL) {
172 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
173 		    "Failing registration for ops vector: %p", f,
174 		    driver, instance, (void *)nexops);
175 		return (DDI_FAILURE);
176 	}
177 
178 	if (nexops->nops_dma_bindhdl == NULL) {
179 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
180 		    "Failing registration for ops vector: %p", f,
181 		    driver, instance, (void *)nexops);
182 		return (DDI_FAILURE);
183 	}
184 
185 	if (nexops->nops_dma_sync == NULL) {
186 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
187 		    "Failing registration for ops vector: %p", f,
188 		    driver, instance, (void *)nexops);
189 		return (DDI_FAILURE);
190 	}
191 
192 	if (nexops->nops_dma_reset_cookies == NULL) {
193 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
194 		    "Failing registration for ops vector: %p", f,
195 		    driver, instance, (void *)nexops);
196 		return (DDI_FAILURE);
197 	}
198 
199 	if (nexops->nops_dma_get_cookies == NULL) {
200 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
201 		    "Failing registration for ops vector: %p", f,
202 		    driver, instance, (void *)nexops);
203 		return (DDI_FAILURE);
204 	}
205 
206 	if (nexops->nops_dma_set_cookies == NULL) {
207 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
208 		    "Failing registration for ops vector: %p", f,
209 		    driver, instance, (void *)nexops);
210 		return (DDI_FAILURE);
211 	}
212 
213 	if (nexops->nops_dma_clear_cookies == NULL) {
214 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
215 		    "Failing registration for ops vector: %p", f,
216 		    driver, instance, (void *)nexops);
217 		return (DDI_FAILURE);
218 	}
219 
220 	if (nexops->nops_dma_get_sleep_flags == NULL) {
221 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
222 		    "Failing registration for ops vector: %p", f,
223 		    driver, instance, (void *)nexops);
224 		return (DDI_FAILURE);
225 	}
226 
227 	if (nexops->nops_dma_win == NULL) {
228 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
229 		    "Failing registration for ops vector: %p", f,
230 		    driver, instance, (void *)nexops);
231 		return (DDI_FAILURE);
232 	}
233 
234 	if (nexops->nops_dmahdl_setprivate == NULL) {
235 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_setprivate op. "
236 		    "Failing registration for ops vector: %p", f,
237 		    driver, instance, (void *)nexops);
238 		return (DDI_FAILURE);
239 	}
240 
241 	if (nexops->nops_dmahdl_getprivate == NULL) {
242 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_getprivate op. "
243 		    "Failing registration for ops vector: %p", f,
244 		    driver, instance, (void *)nexops);
245 		return (DDI_FAILURE);
246 	}
247 
248 	/* Check for legacy ops */
249 	if (nexops->nops_dma_map == NULL) {
250 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. "
251 		    "Failing registration for ops vector: %p", f,
252 		    driver, instance, (void *)nexops);
253 		return (DDI_FAILURE);
254 	}
255 
256 	if (nexops->nops_dma_mctl == NULL) {
257 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. "
258 		    "Failing registration for ops vector: %p", f,
259 		    driver, instance, (void *)nexops);
260 		return (DDI_FAILURE);
261 	}
262 
263 	nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
264 
265 	mutex_enter(&iommulib_lock);
266 	if (iommulib_fini == 1) {
267 		mutex_exit(&iommulib_lock);
268 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
269 		    "Failing NEXUS register.", f);
270 		kmem_free(nexp, sizeof (iommulib_nex_t));
271 		return (DDI_FAILURE);
272 	}
273 
274 	/*
275 	 * fini/register race conditions have been handled. Now create the
276 	 * nexus struct
277 	 */
278 	ndi_hold_devi(dip);
279 	nexp->nex_dip = dip;
280 	nexp->nex_ops = *nexops;
281 
282 	mutex_enter(&iommulib_nexus_lock);
283 	nexp->nex_next = iommulib_nexus_list;
284 	iommulib_nexus_list = nexp;
285 	nexp->nex_prev = NULL;
286 
287 	if (nexp->nex_next != NULL)
288 		nexp->nex_next->nex_prev = nexp;
289 
290 	nexp->nex_ref = 0;
291 
292 	/*
293 	 * The nexus device won't be controlled by an IOMMU.
294 	 */
295 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
296 
297 	DEVI(dip)->devi_iommulib_nex_handle = nexp;
298 
299 	mutex_exit(&iommulib_nexus_lock);
300 	mutex_exit(&iommulib_lock);
301 
302 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
303 	    "nexops=%p", f, driver, instance, ddi_node_name(dip),
304 	    (void *)nexops);
305 
306 	*handle = nexp;
307 
308 	return (DDI_SUCCESS);
309 }
310 
311 int
312 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
313 {
314 	dev_info_t *dip;
315 	int instance;
316 	const char *driver;
317 	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
318 	const char *f = "iommulib_nexus_unregister";
319 
320 	ASSERT(nexp);
321 
322 	if (nexp->nex_ref != 0)
323 		return (DDI_FAILURE);
324 
325 	mutex_enter(&iommulib_nexus_lock);
326 
327 	dip = nexp->nex_dip;
328 	driver = ddi_driver_name(dip);
329 	instance = ddi_get_instance(dip);
330 
331 	/* A future enhancement would be to add ref-counts */
332 
333 	if (nexp->nex_prev == NULL) {
334 		iommulib_nexus_list = nexp->nex_next;
335 	} else {
336 		nexp->nex_prev->nex_next = nexp->nex_next;
337 	}
338 
339 	if (nexp->nex_next != NULL)
340 		nexp->nex_next->nex_prev = nexp->nex_prev;
341 
342 	mutex_exit(&iommulib_nexus_lock);
343 
344 	kmem_free(nexp, sizeof (iommulib_nex_t));
345 
346 	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
347 	    "unregistered from IOMMULIB", f, driver, instance,
348 	    ddi_node_name(dip));
349 
350 	ndi_rele_devi(dip);
351 
352 	return (DDI_SUCCESS);
353 }
354 
355 int
356 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
357     iommulib_handle_t *handle)
358 {
359 	const char *vendor;
360 	iommulib_unit_t *unitp;
361 	int instance = ddi_get_instance(dip);
362 	const char *driver = ddi_driver_name(dip);
363 	dev_info_t *pdip = ddi_get_parent(dip);
364 	const char *f = "iommulib_register";
365 
366 	ASSERT(ops);
367 	ASSERT(handle);
368 
369 	if (ops->ilops_vers != IOMMU_OPS_VERSION) {
370 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
371 		    "in ops vector (%p). Failing registration", f, driver,
372 		    instance, (void *)ops);
373 		return (DDI_FAILURE);
374 	}
375 
376 	switch (ops->ilops_vendor) {
377 	case AMD_IOMMU:
378 		vendor = "AMD";
379 		break;
380 	case INTEL_IOMMU:
381 		vendor = "Intel";
382 		break;
383 	case INVALID_VENDOR:
384 		cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
385 		    "Failing registration for ops vector: %p", f,
386 		    driver, instance, ops->ilops_vendor, (void *)ops);
387 		return (DDI_FAILURE);
388 	default:
389 		cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
390 		    "Failing registration for ops vector: %p", f,
391 		    driver, instance, ops->ilops_vendor, (void *)ops);
392 		return (DDI_FAILURE);
393 	}
394 
395 	cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
396 	    " %s", f, driver, instance, vendor);
397 
398 	if (ops->ilops_data == NULL) {
399 		cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
400 		    "Failing registration for ops vector: %p", f,
401 		    driver, instance, (void *)ops);
402 		return (DDI_FAILURE);
403 	}
404 
405 	if (ops->ilops_id == NULL) {
406 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
407 		    "Failing registration for ops vector: %p", f,
408 		    driver, instance, (void *)ops);
409 		return (DDI_FAILURE);
410 	}
411 
412 	if (ops->ilops_probe == NULL) {
413 		cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
414 		    "Failing registration for ops vector: %p", f,
415 		    driver, instance, (void *)ops);
416 		return (DDI_FAILURE);
417 	}
418 
419 	if (ops->ilops_dma_allochdl == NULL) {
420 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
421 		    "Failing registration for ops vector: %p", f,
422 		    driver, instance, (void *)ops);
423 		return (DDI_FAILURE);
424 	}
425 
426 	if (ops->ilops_dma_freehdl == NULL) {
427 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
428 		    "Failing registration for ops vector: %p", f,
429 		    driver, instance, (void *)ops);
430 		return (DDI_FAILURE);
431 	}
432 
433 	if (ops->ilops_dma_bindhdl == NULL) {
434 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
435 		    "Failing registration for ops vector: %p", f,
436 		    driver, instance, (void *)ops);
437 		return (DDI_FAILURE);
438 	}
439 
440 	if (ops->ilops_dma_sync == NULL) {
441 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
442 		    "Failing registration for ops vector: %p", f,
443 		    driver, instance, (void *)ops);
444 		return (DDI_FAILURE);
445 	}
446 
447 	if (ops->ilops_dma_win == NULL) {
448 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
449 		    "Failing registration for ops vector: %p", f,
450 		    driver, instance, (void *)ops);
451 		return (DDI_FAILURE);
452 	}
453 
454 	/* Check for legacy ops */
455 	if (ops->ilops_dma_map == NULL) {
456 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. "
457 		    "Failing registration for ops vector: %p", f,
458 		    driver, instance, (void *)ops);
459 		return (DDI_FAILURE);
460 	}
461 
462 	if (ops->ilops_dma_mctl == NULL) {
463 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. "
464 		    "Failing registration for ops vector: %p", f,
465 		    driver, instance, (void *)ops);
466 		return (DDI_FAILURE);
467 	}
468 
469 	unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
470 	mutex_enter(&iommulib_lock);
471 	if (iommulib_fini == 1) {
472 		mutex_exit(&iommulib_lock);
473 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
474 		    f);
475 		kmem_free(unitp, sizeof (iommulib_unit_t));
476 		return (DDI_FAILURE);
477 	}
478 
479 	/*
480 	 * fini/register race conditions have been handled. Now create the
481 	 * IOMMU unit
482 	 */
483 	mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
484 
485 	mutex_enter(&unitp->ilu_lock);
486 	unitp->ilu_unitid = ++iommulib_unit_ids;
487 	unitp->ilu_ref = 0;
488 	ndi_hold_devi(dip);
489 	unitp->ilu_dip = dip;
490 	unitp->ilu_ops = ops;
491 	unitp->ilu_data = ops->ilops_data;
492 
493 	unitp->ilu_next = iommulib_list;
494 	iommulib_list = unitp;
495 	unitp->ilu_prev = NULL;
496 	if (unitp->ilu_next)
497 		unitp->ilu_next->ilu_prev = unitp;
498 
499 	/*
500 	 * The IOMMU device itself is not controlled by an IOMMU.
501 	 */
502 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
503 
504 	mutex_exit(&unitp->ilu_lock);
505 
506 	iommulib_num_units++;
507 
508 	*handle = unitp;
509 
510 	mutex_exit(&iommulib_lock);
511 
512 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
513 	    "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
514 	    f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
515 	    unitp->ilu_unitid);
516 
517 	return (DDI_SUCCESS);
518 }
519 
520 int
521 iommulib_iommu_unregister(iommulib_handle_t handle)
522 {
523 	uint32_t unitid;
524 	dev_info_t *dip;
525 	int instance;
526 	const char *driver;
527 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
528 	const char *f = "iommulib_unregister";
529 
530 	ASSERT(unitp);
531 
532 	mutex_enter(&iommulib_lock);
533 	mutex_enter(&unitp->ilu_lock);
534 
535 	unitid = unitp->ilu_unitid;
536 	dip = unitp->ilu_dip;
537 	driver = ddi_driver_name(dip);
538 	instance = ddi_get_instance(dip);
539 
540 	if (unitp->ilu_ref != 0) {
541 		mutex_exit(&unitp->ilu_lock);
542 		mutex_exit(&iommulib_lock);
543 		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
544 		    "unregister IOMMULIB unitid %u",
545 		    f, driver, instance, unitid);
546 		return (DDI_FAILURE);
547 	}
548 	unitp->ilu_unitid = 0;
549 	ASSERT(unitp->ilu_ref == 0);
550 
551 	if (unitp->ilu_prev == NULL) {
552 		iommulib_list = unitp->ilu_next;
553 		unitp->ilu_next->ilu_prev = NULL;
554 	} else {
555 		unitp->ilu_prev->ilu_next = unitp->ilu_next;
556 		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
557 	}
558 
559 	iommulib_num_units--;
560 
561 	mutex_exit(&unitp->ilu_lock);
562 
563 	mutex_destroy(&unitp->ilu_lock);
564 	kmem_free(unitp, sizeof (iommulib_unit_t));
565 
566 	mutex_exit(&iommulib_lock);
567 
568 	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
569 	    "unregistered", f, driver, instance, unitid);
570 
571 	ndi_rele_devi(dip);
572 
573 	return (DDI_SUCCESS);
574 }
575 
576 int
577 iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
578 {
579 	iommulib_unit_t *unitp;
580 	int instance = ddi_get_instance(rdip);
581 	const char *driver = ddi_driver_name(rdip);
582 	const char *f = "iommulib_nex_open";
583 
584 	ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
585 	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
586 
587 	/* prevent use of IOMMU for AMD IOMMU's DMA */
588 	if (strcmp(driver, "amd_iommu") == 0) {
589 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
590 		return (DDI_ENOTSUP);
591 	}
592 
593 	/*
594 	 * Use the probe entry point to determine in a hardware specific
595 	 * manner whether this dip is controlled by an IOMMU. If yes,
596 	 * return the handle corresponding to the IOMMU unit.
597 	 */
598 
599 	mutex_enter(&iommulib_lock);
600 	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
601 		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
602 			break;
603 	}
604 
605 	if (unitp == NULL) {
606 		mutex_exit(&iommulib_lock);
607 		if (iommulib_debug) {
608 			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
609 			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
610 			    "controlled by an IOMMU: path=%s", f, driver,
611 			    instance, (void *)rdip, ddi_pathname(rdip, buf));
612 			kmem_free(buf, MAXPATHLEN);
613 		}
614 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
615 		return (DDI_ENOTSUP);
616 	}
617 
618 	mutex_enter(&unitp->ilu_lock);
619 	unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
620 	unitp->ilu_ref++;
621 	DEVI(rdip)->devi_iommulib_handle = unitp;
622 	mutex_exit(&unitp->ilu_lock);
623 	mutex_exit(&iommulib_lock);
624 
625 	atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);
626 
627 	return (DDI_SUCCESS);
628 }
629 
630 void
631 iommulib_nex_close(dev_info_t *rdip)
632 {
633 	iommulib_unit_t *unitp;
634 	const char *driver;
635 	int instance;
636 	uint32_t unitid;
637 	iommulib_nex_t *nexp;
638 	const char *f = "iommulib_nex_close";
639 
640 	ASSERT(IOMMU_USED(rdip));
641 
642 	unitp = DEVI(rdip)->devi_iommulib_handle;
643 
644 	mutex_enter(&iommulib_lock);
645 	mutex_enter(&unitp->ilu_lock);
646 
647 	nexp = (iommulib_nex_t *)unitp->ilu_nex;
648 	DEVI(rdip)->devi_iommulib_handle = NULL;
649 
650 	unitid = unitp->ilu_unitid;
651 	driver = ddi_driver_name(unitp->ilu_dip);
652 	instance = ddi_get_instance(unitp->ilu_dip);
653 
654 	unitp->ilu_ref--;
655 	mutex_exit(&unitp->ilu_lock);
656 	mutex_exit(&iommulib_lock);
657 
658 	atomic_dec_uint(&nexp->nex_ref);
659 
660 	if (iommulib_debug) {
661 		char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
662 		(void) ddi_pathname(rdip, buf);
663 		cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
664 		    "unitid=%u rdip path = %s", f, driver, instance,
665 		    (void *)rdip, unitid, buf);
666 		kmem_free(buf, MAXPATHLEN);
667 	}
668 }
669 
670 int
671 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
672     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
673     caddr_t arg, ddi_dma_handle_t *dma_handlep)
674 {
675 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
676 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
677 
678 	ASSERT(unitp);
679 
680 	/* No need to grab lock - the handle is reference counted */
681 	return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
682 	    attr, waitfp, arg, dma_handlep));
683 }
684 
685 int
686 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
687     ddi_dma_handle_t dma_handle)
688 {
689 	int error;
690 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
691 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
692 
693 	ASSERT(unitp);
694 
695 	/* No need to grab lock - the handle is reference counted */
696 	error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
697 	    rdip, dma_handle);
698 
699 	return (error);
700 }
701 
702 int
703 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
704     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
705     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
706 {
707 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
708 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
709 
710 	ASSERT(unitp);
711 
712 	/* No need to grab lock - the handle is reference counted */
713 	return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
714 	    dmareq, cookiep, ccountp));
715 }
716 
717 int
718 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
719     ddi_dma_handle_t dma_handle)
720 {
721 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
722 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
723 
724 	ASSERT(unitp);
725 
726 	/* No need to grab lock - the handle is reference counted */
727 	return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
728 	    dma_handle));
729 }
730 
731 int
732 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
733     ddi_dma_handle_t dma_handle, off_t off, size_t len,
734     uint_t cache_flags)
735 {
736 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
737 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
738 
739 	ASSERT(unitp);
740 
741 	/* No need to grab lock - the handle is reference counted */
742 	return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
743 	    off, len, cache_flags));
744 }
745 
746 int
747 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
748     ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
749     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
750 {
751 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
752 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
753 
754 	ASSERT(unitp);
755 
756 	/* No need to grab lock - the handle is reference counted */
757 	return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
758 	    win, offp, lenp, cookiep, ccountp));
759 }
760 
761 /* Obsolete DMA routines */
762 
763 int
764 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip,
765     struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle)
766 {
767 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
768 	iommulib_unit_t *unitp = handle;
769 
770 	ASSERT(unitp);
771 
772 	/* No need to grab lock - the handle is reference counted */
773 	return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq,
774 	    dma_handle));
775 }
776 
777 int
778 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip,
779     ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request,
780     off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags)
781 {
782 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
783 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
784 
785 	ASSERT(unitp);
786 
787 	/* No need to grab lock - the handle is reference counted */
788 	return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle,
789 	    request, offp, lenp, objpp, cache_flags));
790 }
791 
792 int
793 iommulib_nexdma_mapobject(dev_info_t *dip, dev_info_t *rdip,
794     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
795     ddi_dma_obj_t *dmao)
796 {
797 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
798 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
799 
800 	return (unitp->ilu_ops->ilops_dma_mapobject(handle, dip, rdip,
801 	    dma_handle, dmareq, dmao));
802 }
803 
804 int
805 iommulib_nexdma_unmapobject(dev_info_t *dip, dev_info_t *rdip,
806     ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
807 {
808 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
809 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
810 
811 	return (unitp->ilu_ops->ilops_dma_unmapobject(handle, dip, rdip,
812 	    dma_handle, dmao));
813 }
814 
815 /* Utility routines invoked by IOMMU drivers */
816 int
817 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
818     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
819     ddi_dma_handle_t *handlep)
820 {
821 	iommulib_nexops_t *nexops;
822 
823 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
824 	return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
825 	    handlep));
826 }
827 
828 int
829 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
830     ddi_dma_handle_t handle)
831 {
832 	iommulib_nexops_t *nexops;
833 
834 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
835 	ASSERT(nexops);
836 	return (nexops->nops_dma_freehdl(dip, rdip, handle));
837 }
838 
839 int
840 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
841     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
842     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
843 {
844 	iommulib_nexops_t *nexops;
845 
846 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
847 	return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
848 	    cookiep, ccountp));
849 }
850 
851 int
852 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
853     ddi_dma_handle_t handle)
854 {
855 	iommulib_nexops_t *nexops;
856 
857 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
858 	return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
859 }
860 
861 void
862 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
863 {
864 	iommulib_nexops_t *nexops;
865 
866 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
867 	nexops->nops_dma_reset_cookies(dip, handle);
868 }
869 
870 int
871 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
872     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
873 {
874 	iommulib_nexops_t *nexops;
875 
876 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
877 	return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
878 }
879 
880 int
881 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
882     ddi_dma_cookie_t *cookiep, uint_t ccount)
883 {
884 	iommulib_nexops_t *nexops;
885 
886 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
887 	return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
888 }
889 
890 int
891 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
892 {
893 	iommulib_nexops_t *nexops;
894 
895 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
896 	return (nexops->nops_dma_clear_cookies(dip, handle));
897 }
898 
899 int
900 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
901 {
902 	iommulib_nexops_t *nexops;
903 
904 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
905 	return (nexops->nops_dma_get_sleep_flags(handle));
906 }
907 
908 int
909 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
910     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
911 {
912 	iommulib_nexops_t *nexops;
913 
914 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
915 	return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
916 	    cache_flags));
917 }
918 
919 int
920 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
921     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
922     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
923 {
924 	iommulib_nexops_t *nexops;
925 
926 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
927 	return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
928 	    cookiep, ccountp));
929 }
930 
931 int
932 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
933     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
934 {
935 	iommulib_nexops_t *nexops;
936 
937 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
938 	return (nexops->nops_dma_map(dip, rdip, dmareq, handlep));
939 }
940 
941 int
942 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
943     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
944     size_t *lenp, caddr_t *objpp, uint_t cache_flags)
945 {
946 	iommulib_nexops_t *nexops;
947 
948 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
949 	return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp,
950 	    objpp, cache_flags));
951 }
952 
953 int
954 iommulib_iommu_dmahdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
955     ddi_dma_handle_t handle, void *priv)
956 {
957 	iommulib_nexops_t *nexops;
958 
959 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
960 	return (nexops->nops_dmahdl_setprivate(dip, rdip, handle, priv));
961 }
962 
963 void *
964 iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
965     ddi_dma_handle_t handle)
966 {
967 	iommulib_nexops_t *nexops;
968 
969 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
970 	return (nexops->nops_dmahdl_getprivate(dip, rdip, handle));
971 }
972 
973 int
974 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
975 {
976 	iommulib_unit_t *unitp;
977 	uint64_t unitid;
978 
979 	unitp = (iommulib_unit_t *)handle;
980 
981 	ASSERT(unitp);
982 	ASSERT(unitidp);
983 
984 	mutex_enter(&unitp->ilu_lock);
985 	unitid = unitp->ilu_unitid;
986 	mutex_exit(&unitp->ilu_lock);
987 
988 	ASSERT(unitid > 0);
989 	*unitidp = (uint64_t)unitid;
990 
991 	return (DDI_SUCCESS);
992 }
993 
994 dev_info_t *
995 iommulib_iommu_getdip(iommulib_handle_t handle)
996 {
997 	iommulib_unit_t *unitp;
998 	dev_info_t *dip;
999 
1000 	unitp = (iommulib_unit_t *)handle;
1001 
1002 	ASSERT(unitp);
1003 
1004 	mutex_enter(&unitp->ilu_lock);
1005 	dip = unitp->ilu_dip;
1006 	ASSERT(dip);
1007 	ndi_hold_devi(dip);
1008 	mutex_exit(&unitp->ilu_lock);
1009 
1010 	return (dip);
1011 }
1012 
1013 iommulib_ops_t *
1014 iommulib_iommu_getops(iommulib_handle_t handle)
1015 {
1016 	iommulib_unit_t *unitp;
1017 	iommulib_ops_t *ops;
1018 
1019 	unitp = (iommulib_unit_t *)handle;
1020 
1021 	ASSERT(unitp);
1022 
1023 	mutex_enter(&unitp->ilu_lock);
1024 	ops = unitp->ilu_ops;
1025 	mutex_exit(&unitp->ilu_lock);
1026 
1027 	ASSERT(ops);
1028 
1029 	return (ops);
1030 }
1031 
1032 void *
1033 iommulib_iommu_getdata(iommulib_handle_t handle)
1034 {
1035 	iommulib_unit_t *unitp;
1036 	void *data;
1037 
1038 	unitp = (iommulib_unit_t *)handle;
1039 
1040 	ASSERT(unitp);
1041 
1042 	mutex_enter(&unitp->ilu_lock);
1043 	data = unitp->ilu_data;
1044 	mutex_exit(&unitp->ilu_lock);
1045 
1046 	ASSERT(data);
1047 
1048 	return (data);
1049 }
1050