xref: /titanic_50/usr/src/uts/intel/io/iommulib.c (revision 23a1ccea6aac035f084a7a4cdc968687d1b02daf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #pragma ident	"@(#)iommulib.c	1.6	08/09/07 SMI"
26 
27 #include <sys/sunddi.h>
28 #include <sys/sunndi.h>
29 #include <sys/errno.h>
30 #include <sys/modctl.h>
31 #include <sys/iommulib.h>
32 
33 /* ******** Type definitions private to this file  ********************** */
34 
35 /* 1 per IOMMU unit. There may be more than one per dip */
36 typedef struct iommulib_unit {
37 	kmutex_t ilu_lock;
38 	uint64_t ilu_ref;
39 	uint32_t ilu_unitid;
40 	dev_info_t *ilu_dip;
41 	iommulib_ops_t *ilu_ops;
42 	void* ilu_data;
43 	struct iommulib_unit *ilu_next;
44 	struct iommulib_unit *ilu_prev;
45 	iommulib_nexhandle_t ilu_nex;
46 } iommulib_unit_t;
47 
48 typedef struct iommulib_nex {
49 	dev_info_t *nex_dip;
50 	iommulib_nexops_t nex_ops;
51 	struct iommulib_nex *nex_next;
52 	struct iommulib_nex *nex_prev;
53 	uint_t nex_ref;
54 } iommulib_nex_t;
55 
56 /* *********  Globals ************************ */
57 
58 /* For IOMMU drivers */
59 smbios_hdl_t *iommulib_smbios;
60 
61 /* IOMMU side: Following data protected by lock */
62 static kmutex_t iommulib_lock;
63 static iommulib_unit_t   *iommulib_list;
64 static uint64_t iommulib_unit_ids = 0;
65 static uint64_t iommulib_num_units = 0;
66 
67 /* rootnex side data */
68 
69 static kmutex_t iommulib_nexus_lock;
70 static iommulib_nex_t *iommulib_nexus_list;
71 
72 /* can be set atomically without lock */
73 static volatile uint32_t iommulib_fini;
74 
75 /* debug flag */
76 static int iommulib_debug;
77 
78 /*
79  * Module linkage information for the kernel.
80  */
81 static struct modlmisc modlmisc = {
82 	&mod_miscops, "IOMMU library module"
83 };
84 
85 static struct modlinkage modlinkage = {
86 	MODREV_1, (void *)&modlmisc, NULL
87 };
88 
89 int
90 _init(void)
91 {
92 	return (mod_install(&modlinkage));
93 }
94 
95 int
96 _fini(void)
97 {
98 	mutex_enter(&iommulib_lock);
99 	if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
100 		mutex_exit(&iommulib_lock);
101 		return (EBUSY);
102 	}
103 	iommulib_fini = 1;
104 
105 	mutex_exit(&iommulib_lock);
106 	return (mod_remove(&modlinkage));
107 }
108 
109 int
110 _info(struct modinfo *modinfop)
111 {
112 	return (mod_info(&modlinkage, modinfop));
113 }
114 
115 /*
116  * Routines with iommulib_iommu_* are invoked from the
117  * IOMMU driver.
118  * Routines with iommulib_nex* are invoked from the
119  * nexus driver (typically rootnex)
120  */
121 
122 int
123 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
124     iommulib_nexhandle_t *handle)
125 {
126 	iommulib_nex_t *nexp;
127 	int instance = ddi_get_instance(dip);
128 	const char *driver = ddi_driver_name(dip);
129 	dev_info_t *pdip = ddi_get_parent(dip);
130 	const char *f = "iommulib_nexus_register";
131 
132 	ASSERT(nexops);
133 	ASSERT(handle);
134 
135 	*handle = NULL;
136 
137 	/*
138 	 * Root node is never busy held
139 	 */
140 	if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
141 	    !DEVI_BUSY_OWNED(pdip))) {
142 		cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
143 		    "or busy held for nexops vector (%p). Failing registration",
144 		    f, (void *)nexops);
145 		return (DDI_FAILURE);
146 	}
147 
148 	if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
149 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
150 		    "in nexops vector (%p). Failing NEXUS registration",
151 		    f, driver, instance, (void *)nexops);
152 		return (DDI_FAILURE);
153 	}
154 
155 	ASSERT(nexops->nops_data == NULL);
156 
157 	if (nexops->nops_id == NULL) {
158 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
159 		    "Failing registration for nexops vector: %p",
160 		    f, driver, instance, (void *)nexops);
161 		return (DDI_FAILURE);
162 	}
163 
164 	if (nexops->nops_dma_allochdl == NULL) {
165 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
166 		    "Failing registration for ops vector: %p", f,
167 		    driver, instance, (void *)nexops);
168 		return (DDI_FAILURE);
169 	}
170 
171 	if (nexops->nops_dma_freehdl == NULL) {
172 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
173 		    "Failing registration for ops vector: %p", f,
174 		    driver, instance, (void *)nexops);
175 		return (DDI_FAILURE);
176 	}
177 
178 	if (nexops->nops_dma_bindhdl == NULL) {
179 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
180 		    "Failing registration for ops vector: %p", f,
181 		    driver, instance, (void *)nexops);
182 		return (DDI_FAILURE);
183 	}
184 
185 	if (nexops->nops_dma_sync == NULL) {
186 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
187 		    "Failing registration for ops vector: %p", f,
188 		    driver, instance, (void *)nexops);
189 		return (DDI_FAILURE);
190 	}
191 
192 	if (nexops->nops_dma_reset_cookies == NULL) {
193 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
194 		    "Failing registration for ops vector: %p", f,
195 		    driver, instance, (void *)nexops);
196 		return (DDI_FAILURE);
197 	}
198 
199 	if (nexops->nops_dma_get_cookies == NULL) {
200 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
201 		    "Failing registration for ops vector: %p", f,
202 		    driver, instance, (void *)nexops);
203 		return (DDI_FAILURE);
204 	}
205 
206 	if (nexops->nops_dma_set_cookies == NULL) {
207 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
208 		    "Failing registration for ops vector: %p", f,
209 		    driver, instance, (void *)nexops);
210 		return (DDI_FAILURE);
211 	}
212 
213 	if (nexops->nops_dma_clear_cookies == NULL) {
214 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
215 		    "Failing registration for ops vector: %p", f,
216 		    driver, instance, (void *)nexops);
217 		return (DDI_FAILURE);
218 	}
219 
220 	if (nexops->nops_dma_get_sleep_flags == NULL) {
221 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
222 		    "Failing registration for ops vector: %p", f,
223 		    driver, instance, (void *)nexops);
224 		return (DDI_FAILURE);
225 	}
226 
227 	if (nexops->nops_dma_win == NULL) {
228 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
229 		    "Failing registration for ops vector: %p", f,
230 		    driver, instance, (void *)nexops);
231 		return (DDI_FAILURE);
232 	}
233 
234 	if (nexops->nops_dmahdl_setprivate == NULL) {
235 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_setprivate op. "
236 		    "Failing registration for ops vector: %p", f,
237 		    driver, instance, (void *)nexops);
238 		return (DDI_FAILURE);
239 	}
240 
241 	if (nexops->nops_dmahdl_getprivate == NULL) {
242 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_getprivate op. "
243 		    "Failing registration for ops vector: %p", f,
244 		    driver, instance, (void *)nexops);
245 		return (DDI_FAILURE);
246 	}
247 
248 	/* Check for legacy ops */
249 	if (nexops->nops_dma_map == NULL) {
250 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. "
251 		    "Failing registration for ops vector: %p", f,
252 		    driver, instance, (void *)nexops);
253 		return (DDI_FAILURE);
254 	}
255 
256 	if (nexops->nops_dma_mctl == NULL) {
257 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. "
258 		    "Failing registration for ops vector: %p", f,
259 		    driver, instance, (void *)nexops);
260 		return (DDI_FAILURE);
261 	}
262 
263 	nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
264 
265 	mutex_enter(&iommulib_lock);
266 	if (iommulib_fini == 1) {
267 		mutex_exit(&iommulib_lock);
268 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
269 		    "Failing NEXUS register.", f);
270 		kmem_free(nexp, sizeof (iommulib_nex_t));
271 		return (DDI_FAILURE);
272 	}
273 
274 	/*
275 	 * fini/register race conditions have been handled. Now create the
276 	 * nexus struct
277 	 */
278 	ndi_hold_devi(dip);
279 	nexp->nex_dip = dip;
280 	nexp->nex_ops = *nexops;
281 
282 	mutex_enter(&iommulib_nexus_lock);
283 	nexp->nex_next = iommulib_nexus_list;
284 	iommulib_nexus_list = nexp;
285 	nexp->nex_prev = NULL;
286 
287 	if (nexp->nex_next != NULL)
288 		nexp->nex_next->nex_prev = nexp;
289 
290 	nexp->nex_ref = 0;
291 
292 	/*
293 	 * The nexus device won't be controlled by an IOMMU.
294 	 */
295 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
296 
297 	DEVI(dip)->devi_iommulib_nex_handle = nexp;
298 
299 	mutex_exit(&iommulib_nexus_lock);
300 	mutex_exit(&iommulib_lock);
301 
302 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
303 	    "nexops=%p", f, driver, instance, ddi_node_name(dip),
304 	    (void *)nexops);
305 
306 	*handle = nexp;
307 
308 	return (DDI_SUCCESS);
309 }
310 
311 int
312 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
313 {
314 	dev_info_t *dip;
315 	int instance;
316 	const char *driver;
317 	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
318 	const char *f = "iommulib_nexus_unregister";
319 
320 	ASSERT(nexp);
321 
322 	if (nexp->nex_ref != 0)
323 		return (DDI_FAILURE);
324 
325 	mutex_enter(&iommulib_nexus_lock);
326 
327 	dip = nexp->nex_dip;
328 	driver = ddi_driver_name(dip);
329 	instance = ddi_get_instance(dip);
330 
331 	/* A future enhancement would be to add ref-counts */
332 
333 	if (nexp->nex_prev == NULL) {
334 		iommulib_nexus_list = nexp->nex_next;
335 	} else {
336 		nexp->nex_prev->nex_next = nexp->nex_next;
337 	}
338 
339 	if (nexp->nex_next != NULL)
340 		nexp->nex_next->nex_prev = nexp->nex_prev;
341 
342 	mutex_exit(&iommulib_nexus_lock);
343 
344 	kmem_free(nexp, sizeof (iommulib_nex_t));
345 
346 	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
347 	    "unregistered from IOMMULIB", f, driver, instance,
348 	    ddi_node_name(dip));
349 
350 	ndi_rele_devi(dip);
351 
352 	return (DDI_SUCCESS);
353 }
354 
355 int
356 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
357     iommulib_handle_t *handle)
358 {
359 	const char *vendor;
360 	iommulib_unit_t *unitp;
361 	int instance = ddi_get_instance(dip);
362 	const char *driver = ddi_driver_name(dip);
363 	const char *f = "iommulib_register";
364 
365 	ASSERT(ops);
366 	ASSERT(handle);
367 
368 	if (ops->ilops_vers != IOMMU_OPS_VERSION) {
369 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
370 		    "in ops vector (%p). Failing registration", f, driver,
371 		    instance, (void *)ops);
372 		return (DDI_FAILURE);
373 	}
374 
375 	switch (ops->ilops_vendor) {
376 	case AMD_IOMMU:
377 		vendor = "AMD";
378 		break;
379 	case INTEL_IOMMU:
380 		vendor = "Intel";
381 		break;
382 	case INVALID_VENDOR:
383 		cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
384 		    "Failing registration for ops vector: %p", f,
385 		    driver, instance, ops->ilops_vendor, (void *)ops);
386 		return (DDI_FAILURE);
387 	default:
388 		cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
389 		    "Failing registration for ops vector: %p", f,
390 		    driver, instance, ops->ilops_vendor, (void *)ops);
391 		return (DDI_FAILURE);
392 	}
393 
394 	cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
395 	    " %s", f, driver, instance, vendor);
396 
397 	if (ops->ilops_data == NULL) {
398 		cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
399 		    "Failing registration for ops vector: %p", f,
400 		    driver, instance, (void *)ops);
401 		return (DDI_FAILURE);
402 	}
403 
404 	if (ops->ilops_id == NULL) {
405 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
406 		    "Failing registration for ops vector: %p", f,
407 		    driver, instance, (void *)ops);
408 		return (DDI_FAILURE);
409 	}
410 
411 	if (ops->ilops_probe == NULL) {
412 		cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
413 		    "Failing registration for ops vector: %p", f,
414 		    driver, instance, (void *)ops);
415 		return (DDI_FAILURE);
416 	}
417 
418 	if (ops->ilops_dma_allochdl == NULL) {
419 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
420 		    "Failing registration for ops vector: %p", f,
421 		    driver, instance, (void *)ops);
422 		return (DDI_FAILURE);
423 	}
424 
425 	if (ops->ilops_dma_freehdl == NULL) {
426 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
427 		    "Failing registration for ops vector: %p", f,
428 		    driver, instance, (void *)ops);
429 		return (DDI_FAILURE);
430 	}
431 
432 	if (ops->ilops_dma_bindhdl == NULL) {
433 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
434 		    "Failing registration for ops vector: %p", f,
435 		    driver, instance, (void *)ops);
436 		return (DDI_FAILURE);
437 	}
438 
439 	if (ops->ilops_dma_sync == NULL) {
440 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
441 		    "Failing registration for ops vector: %p", f,
442 		    driver, instance, (void *)ops);
443 		return (DDI_FAILURE);
444 	}
445 
446 	if (ops->ilops_dma_win == NULL) {
447 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
448 		    "Failing registration for ops vector: %p", f,
449 		    driver, instance, (void *)ops);
450 		return (DDI_FAILURE);
451 	}
452 
453 	/* Check for legacy ops */
454 	if (ops->ilops_dma_map == NULL) {
455 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. "
456 		    "Failing registration for ops vector: %p", f,
457 		    driver, instance, (void *)ops);
458 		return (DDI_FAILURE);
459 	}
460 
461 	if (ops->ilops_dma_mctl == NULL) {
462 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. "
463 		    "Failing registration for ops vector: %p", f,
464 		    driver, instance, (void *)ops);
465 		return (DDI_FAILURE);
466 	}
467 
468 	unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
469 	mutex_enter(&iommulib_lock);
470 	if (iommulib_fini == 1) {
471 		mutex_exit(&iommulib_lock);
472 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
473 		    f);
474 		kmem_free(unitp, sizeof (iommulib_unit_t));
475 		return (DDI_FAILURE);
476 	}
477 
478 	/*
479 	 * fini/register race conditions have been handled. Now create the
480 	 * IOMMU unit
481 	 */
482 	mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
483 
484 	mutex_enter(&unitp->ilu_lock);
485 	unitp->ilu_unitid = ++iommulib_unit_ids;
486 	unitp->ilu_ref = 0;
487 	ndi_hold_devi(dip);
488 	unitp->ilu_dip = dip;
489 	unitp->ilu_ops = ops;
490 	unitp->ilu_data = ops->ilops_data;
491 
492 	unitp->ilu_next = iommulib_list;
493 	iommulib_list = unitp;
494 	unitp->ilu_prev = NULL;
495 	if (unitp->ilu_next)
496 		unitp->ilu_next->ilu_prev = unitp;
497 
498 	/*
499 	 * The IOMMU device itself is not controlled by an IOMMU.
500 	 */
501 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
502 
503 	mutex_exit(&unitp->ilu_lock);
504 
505 	iommulib_num_units++;
506 
507 	*handle = unitp;
508 
509 	mutex_exit(&iommulib_lock);
510 
511 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
512 	    "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
513 	    f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
514 	    unitp->ilu_unitid);
515 
516 	return (DDI_SUCCESS);
517 }
518 
519 int
520 iommulib_iommu_unregister(iommulib_handle_t handle)
521 {
522 	uint32_t unitid;
523 	dev_info_t *dip;
524 	int instance;
525 	const char *driver;
526 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
527 	const char *f = "iommulib_unregister";
528 
529 	ASSERT(unitp);
530 
531 	mutex_enter(&iommulib_lock);
532 	mutex_enter(&unitp->ilu_lock);
533 
534 	unitid = unitp->ilu_unitid;
535 	dip = unitp->ilu_dip;
536 	driver = ddi_driver_name(dip);
537 	instance = ddi_get_instance(dip);
538 
539 	if (unitp->ilu_ref != 0) {
540 		mutex_exit(&unitp->ilu_lock);
541 		mutex_exit(&iommulib_lock);
542 		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
543 		    "unregister IOMMULIB unitid %u",
544 		    f, driver, instance, unitid);
545 		return (DDI_FAILURE);
546 	}
547 	unitp->ilu_unitid = 0;
548 	ASSERT(unitp->ilu_ref == 0);
549 
550 	if (unitp->ilu_prev == NULL) {
551 		iommulib_list = unitp->ilu_next;
552 		unitp->ilu_next->ilu_prev = NULL;
553 	} else {
554 		unitp->ilu_prev->ilu_next = unitp->ilu_next;
555 		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
556 	}
557 
558 	iommulib_num_units--;
559 
560 	mutex_exit(&unitp->ilu_lock);
561 
562 	mutex_destroy(&unitp->ilu_lock);
563 	kmem_free(unitp, sizeof (iommulib_unit_t));
564 
565 	mutex_exit(&iommulib_lock);
566 
567 	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
568 	    "unregistered", f, driver, instance, unitid);
569 
570 	ndi_rele_devi(dip);
571 
572 	return (DDI_SUCCESS);
573 }
574 
575 int
576 iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
577 {
578 	iommulib_unit_t *unitp;
579 	int instance = ddi_get_instance(rdip);
580 	const char *driver = ddi_driver_name(rdip);
581 	const char *f = "iommulib_nex_open";
582 
583 	ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
584 	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
585 
586 	/* prevent use of IOMMU for AMD IOMMU's DMA */
587 	if (strcmp(driver, "amd_iommu") == 0) {
588 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
589 		return (DDI_ENOTSUP);
590 	}
591 
592 	/*
593 	 * Use the probe entry point to determine in a hardware specific
594 	 * manner whether this dip is controlled by an IOMMU. If yes,
595 	 * return the handle corresponding to the IOMMU unit.
596 	 */
597 
598 	mutex_enter(&iommulib_lock);
599 	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
600 		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
601 			break;
602 	}
603 
604 	if (unitp == NULL) {
605 		mutex_exit(&iommulib_lock);
606 		if (iommulib_debug) {
607 			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
608 			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
609 			    "controlled by an IOMMU: path=%s", f, driver,
610 			    instance, (void *)rdip, ddi_pathname(rdip, buf));
611 			kmem_free(buf, MAXPATHLEN);
612 		}
613 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
614 		return (DDI_ENOTSUP);
615 	}
616 
617 	mutex_enter(&unitp->ilu_lock);
618 	unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
619 	unitp->ilu_ref++;
620 	DEVI(rdip)->devi_iommulib_handle = unitp;
621 	mutex_exit(&unitp->ilu_lock);
622 	mutex_exit(&iommulib_lock);
623 
624 	atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);
625 
626 	return (DDI_SUCCESS);
627 }
628 
629 void
630 iommulib_nex_close(dev_info_t *rdip)
631 {
632 	iommulib_unit_t *unitp;
633 	const char *driver;
634 	int instance;
635 	uint32_t unitid;
636 	iommulib_nex_t *nexp;
637 	const char *f = "iommulib_nex_close";
638 
639 	ASSERT(IOMMU_USED(rdip));
640 
641 	unitp = DEVI(rdip)->devi_iommulib_handle;
642 
643 	mutex_enter(&iommulib_lock);
644 	mutex_enter(&unitp->ilu_lock);
645 
646 	nexp = (iommulib_nex_t *)unitp->ilu_nex;
647 	DEVI(rdip)->devi_iommulib_handle = NULL;
648 
649 	unitid = unitp->ilu_unitid;
650 	driver = ddi_driver_name(unitp->ilu_dip);
651 	instance = ddi_get_instance(unitp->ilu_dip);
652 
653 	unitp->ilu_ref--;
654 	mutex_exit(&unitp->ilu_lock);
655 	mutex_exit(&iommulib_lock);
656 
657 	atomic_dec_uint(&nexp->nex_ref);
658 
659 	if (iommulib_debug) {
660 		char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
661 		(void) ddi_pathname(rdip, buf);
662 		cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
663 		    "unitid=%u rdip path = %s", f, driver, instance,
664 		    (void *)rdip, unitid, buf);
665 		kmem_free(buf, MAXPATHLEN);
666 	}
667 }
668 
669 int
670 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
671     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
672     caddr_t arg, ddi_dma_handle_t *dma_handlep)
673 {
674 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
675 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
676 
677 	ASSERT(unitp);
678 
679 	/* No need to grab lock - the handle is reference counted */
680 	return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
681 	    attr, waitfp, arg, dma_handlep));
682 }
683 
684 int
685 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
686     ddi_dma_handle_t dma_handle)
687 {
688 	int error;
689 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
690 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
691 
692 	ASSERT(unitp);
693 
694 	/* No need to grab lock - the handle is reference counted */
695 	error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
696 	    rdip, dma_handle);
697 
698 	return (error);
699 }
700 
701 int
702 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
703     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
704     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
705 {
706 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
707 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
708 
709 	ASSERT(unitp);
710 
711 	/* No need to grab lock - the handle is reference counted */
712 	return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
713 	    dmareq, cookiep, ccountp));
714 }
715 
716 int
717 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
718     ddi_dma_handle_t dma_handle)
719 {
720 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
721 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
722 
723 	ASSERT(unitp);
724 
725 	/* No need to grab lock - the handle is reference counted */
726 	return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
727 	    dma_handle));
728 }
729 
730 int
731 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
732     ddi_dma_handle_t dma_handle, off_t off, size_t len,
733     uint_t cache_flags)
734 {
735 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
736 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
737 
738 	ASSERT(unitp);
739 
740 	/* No need to grab lock - the handle is reference counted */
741 	return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
742 	    off, len, cache_flags));
743 }
744 
745 int
746 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
747     ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
748     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
749 {
750 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
751 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
752 
753 	ASSERT(unitp);
754 
755 	/* No need to grab lock - the handle is reference counted */
756 	return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
757 	    win, offp, lenp, cookiep, ccountp));
758 }
759 
760 /* Obsolete DMA routines */
761 
762 int
763 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip,
764     struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle)
765 {
766 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
767 	iommulib_unit_t *unitp = handle;
768 
769 	ASSERT(unitp);
770 
771 	/* No need to grab lock - the handle is reference counted */
772 	return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq,
773 	    dma_handle));
774 }
775 
776 int
777 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip,
778     ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request,
779     off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags)
780 {
781 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
782 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
783 
784 	ASSERT(unitp);
785 
786 	/* No need to grab lock - the handle is reference counted */
787 	return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle,
788 	    request, offp, lenp, objpp, cache_flags));
789 }
790 
791 int
792 iommulib_nexdma_mapobject(dev_info_t *dip, dev_info_t *rdip,
793     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
794     ddi_dma_obj_t *dmao)
795 {
796 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
797 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
798 
799 	return (unitp->ilu_ops->ilops_dma_mapobject(handle, dip, rdip,
800 	    dma_handle, dmareq, dmao));
801 }
802 
803 int
804 iommulib_nexdma_unmapobject(dev_info_t *dip, dev_info_t *rdip,
805     ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
806 {
807 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
808 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
809 
810 	return (unitp->ilu_ops->ilops_dma_unmapobject(handle, dip, rdip,
811 	    dma_handle, dmao));
812 }
813 
814 /* Utility routines invoked by IOMMU drivers */
815 int
816 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
817     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
818     ddi_dma_handle_t *handlep)
819 {
820 	iommulib_nexops_t *nexops;
821 
822 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
823 	return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
824 	    handlep));
825 }
826 
827 int
828 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
829     ddi_dma_handle_t handle)
830 {
831 	iommulib_nexops_t *nexops;
832 
833 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
834 	ASSERT(nexops);
835 	return (nexops->nops_dma_freehdl(dip, rdip, handle));
836 }
837 
838 int
839 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
840     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
841     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
842 {
843 	iommulib_nexops_t *nexops;
844 
845 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
846 	return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
847 	    cookiep, ccountp));
848 }
849 
850 int
851 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
852     ddi_dma_handle_t handle)
853 {
854 	iommulib_nexops_t *nexops;
855 
856 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
857 	return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
858 }
859 
860 void
861 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
862 {
863 	iommulib_nexops_t *nexops;
864 
865 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
866 	nexops->nops_dma_reset_cookies(dip, handle);
867 }
868 
869 int
870 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
871     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
872 {
873 	iommulib_nexops_t *nexops;
874 
875 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
876 	return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
877 }
878 
879 int
880 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
881     ddi_dma_cookie_t *cookiep, uint_t ccount)
882 {
883 	iommulib_nexops_t *nexops;
884 
885 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
886 	return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
887 }
888 
889 int
890 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
891 {
892 	iommulib_nexops_t *nexops;
893 
894 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
895 	return (nexops->nops_dma_clear_cookies(dip, handle));
896 }
897 
898 int
899 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
900 {
901 	iommulib_nexops_t *nexops;
902 
903 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
904 	return (nexops->nops_dma_get_sleep_flags(handle));
905 }
906 
907 int
908 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
909     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
910 {
911 	iommulib_nexops_t *nexops;
912 
913 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
914 	return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
915 	    cache_flags));
916 }
917 
918 int
919 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
920     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
921     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
922 {
923 	iommulib_nexops_t *nexops;
924 
925 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
926 	return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
927 	    cookiep, ccountp));
928 }
929 
930 int
931 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
932     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
933 {
934 	iommulib_nexops_t *nexops;
935 
936 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
937 	return (nexops->nops_dma_map(dip, rdip, dmareq, handlep));
938 }
939 
940 int
941 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
942     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
943     size_t *lenp, caddr_t *objpp, uint_t cache_flags)
944 {
945 	iommulib_nexops_t *nexops;
946 
947 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
948 	return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp,
949 	    objpp, cache_flags));
950 }
951 
952 int
953 iommulib_iommu_dmahdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
954     ddi_dma_handle_t handle, void *priv)
955 {
956 	iommulib_nexops_t *nexops;
957 
958 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
959 	return (nexops->nops_dmahdl_setprivate(dip, rdip, handle, priv));
960 }
961 
962 void *
963 iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
964     ddi_dma_handle_t handle)
965 {
966 	iommulib_nexops_t *nexops;
967 
968 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
969 	return (nexops->nops_dmahdl_getprivate(dip, rdip, handle));
970 }
971 
972 int
973 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
974 {
975 	iommulib_unit_t *unitp;
976 	uint64_t unitid;
977 
978 	unitp = (iommulib_unit_t *)handle;
979 
980 	ASSERT(unitp);
981 	ASSERT(unitidp);
982 
983 	mutex_enter(&unitp->ilu_lock);
984 	unitid = unitp->ilu_unitid;
985 	mutex_exit(&unitp->ilu_lock);
986 
987 	ASSERT(unitid > 0);
988 	*unitidp = (uint64_t)unitid;
989 
990 	return (DDI_SUCCESS);
991 }
992 
993 dev_info_t *
994 iommulib_iommu_getdip(iommulib_handle_t handle)
995 {
996 	iommulib_unit_t *unitp;
997 	dev_info_t *dip;
998 
999 	unitp = (iommulib_unit_t *)handle;
1000 
1001 	ASSERT(unitp);
1002 
1003 	mutex_enter(&unitp->ilu_lock);
1004 	dip = unitp->ilu_dip;
1005 	ASSERT(dip);
1006 	ndi_hold_devi(dip);
1007 	mutex_exit(&unitp->ilu_lock);
1008 
1009 	return (dip);
1010 }
1011 
1012 iommulib_ops_t *
1013 iommulib_iommu_getops(iommulib_handle_t handle)
1014 {
1015 	iommulib_unit_t *unitp;
1016 	iommulib_ops_t *ops;
1017 
1018 	unitp = (iommulib_unit_t *)handle;
1019 
1020 	ASSERT(unitp);
1021 
1022 	mutex_enter(&unitp->ilu_lock);
1023 	ops = unitp->ilu_ops;
1024 	mutex_exit(&unitp->ilu_lock);
1025 
1026 	ASSERT(ops);
1027 
1028 	return (ops);
1029 }
1030 
1031 void *
1032 iommulib_iommu_getdata(iommulib_handle_t handle)
1033 {
1034 	iommulib_unit_t *unitp;
1035 	void *data;
1036 
1037 	unitp = (iommulib_unit_t *)handle;
1038 
1039 	ASSERT(unitp);
1040 
1041 	mutex_enter(&unitp->ilu_lock);
1042 	data = unitp->ilu_data;
1043 	mutex_exit(&unitp->ilu_lock);
1044 
1045 	ASSERT(data);
1046 
1047 	return (data);
1048 }
1049