1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
24 */
25
26 #pragma ident "@(#)iommulib.c 1.6 08/09/07 SMI"
27
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/errno.h>
31 #include <sys/modctl.h>
32 #include <sys/iommulib.h>
33
34 /* ******** Type definitions private to this file ********************** */
35
36 /* 1 per IOMMU unit. There may be more than one per dip */
37 typedef struct iommulib_unit {
38 kmutex_t ilu_lock;
39 uint64_t ilu_ref;
40 uint32_t ilu_unitid;
41 dev_info_t *ilu_dip;
42 iommulib_ops_t *ilu_ops;
43 void* ilu_data;
44 struct iommulib_unit *ilu_next;
45 struct iommulib_unit *ilu_prev;
46 iommulib_nexhandle_t ilu_nex;
47 } iommulib_unit_t;
48
49 typedef struct iommulib_nex {
50 dev_info_t *nex_dip;
51 iommulib_nexops_t nex_ops;
52 struct iommulib_nex *nex_next;
53 struct iommulib_nex *nex_prev;
54 uint_t nex_ref;
55 } iommulib_nex_t;
56
57 /* ********* Globals ************************ */
58
59 /* For IOMMU drivers */
60 smbios_hdl_t *iommulib_smbios;
61
62 /* IOMMU side: Following data protected by lock */
63 static kmutex_t iommulib_lock;
64 static iommulib_unit_t *iommulib_list;
65 static uint64_t iommulib_unit_ids = 0;
66 static uint64_t iommulib_num_units = 0;
67
68 /* rootnex side data */
69
70 static kmutex_t iommulib_nexus_lock;
71 static iommulib_nex_t *iommulib_nexus_list;
72
73 /* can be set atomically without lock */
74 static volatile uint32_t iommulib_fini;
75
76 /* debug flag */
77 static int iommulib_debug;
78
79 /*
80 * Module linkage information for the kernel.
81 */
82 static struct modlmisc modlmisc = {
83 &mod_miscops, "IOMMU library module"
84 };
85
86 static struct modlinkage modlinkage = {
87 MODREV_1, (void *)&modlmisc, NULL
88 };
89
90 int
_init(void)91 _init(void)
92 {
93 return (mod_install(&modlinkage));
94 }
95
96 int
_fini(void)97 _fini(void)
98 {
99 mutex_enter(&iommulib_lock);
100 if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
101 mutex_exit(&iommulib_lock);
102 return (EBUSY);
103 }
104 iommulib_fini = 1;
105
106 mutex_exit(&iommulib_lock);
107 return (mod_remove(&modlinkage));
108 }
109
110 int
_info(struct modinfo * modinfop)111 _info(struct modinfo *modinfop)
112 {
113 return (mod_info(&modlinkage, modinfop));
114 }
115
116 /*
117 * Routines with iommulib_iommu_* are invoked from the
118 * IOMMU driver.
119 * Routines with iommulib_nex* are invoked from the
120 * nexus driver (typically rootnex)
121 */
122
123 int
iommulib_nexus_register(dev_info_t * dip,iommulib_nexops_t * nexops,iommulib_nexhandle_t * handle)124 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
125 iommulib_nexhandle_t *handle)
126 {
127 iommulib_nex_t *nexp;
128 int instance = ddi_get_instance(dip);
129 const char *driver = ddi_driver_name(dip);
130 dev_info_t *pdip = ddi_get_parent(dip);
131 const char *f = "iommulib_nexus_register";
132
133 ASSERT(nexops);
134 ASSERT(handle);
135
136 *handle = NULL;
137
138 /*
139 * Root node is never busy held
140 */
141 if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
142 !DEVI_BUSY_OWNED(pdip))) {
143 cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
144 "or busy held for nexops vector (%p). Failing registration",
145 f, (void *)nexops);
146 return (DDI_FAILURE);
147 }
148
149 if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
150 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
151 "in nexops vector (%p). Failing NEXUS registration",
152 f, driver, instance, (void *)nexops);
153 return (DDI_FAILURE);
154 }
155
156 ASSERT(nexops->nops_data == NULL);
157
158 if (nexops->nops_id == NULL) {
159 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
160 "Failing registration for nexops vector: %p",
161 f, driver, instance, (void *)nexops);
162 return (DDI_FAILURE);
163 }
164
165 if (nexops->nops_dma_allochdl == NULL) {
166 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
167 "Failing registration for ops vector: %p", f,
168 driver, instance, (void *)nexops);
169 return (DDI_FAILURE);
170 }
171
172 if (nexops->nops_dma_freehdl == NULL) {
173 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
174 "Failing registration for ops vector: %p", f,
175 driver, instance, (void *)nexops);
176 return (DDI_FAILURE);
177 }
178
179 if (nexops->nops_dma_bindhdl == NULL) {
180 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
181 "Failing registration for ops vector: %p", f,
182 driver, instance, (void *)nexops);
183 return (DDI_FAILURE);
184 }
185
186 if (nexops->nops_dma_sync == NULL) {
187 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
188 "Failing registration for ops vector: %p", f,
189 driver, instance, (void *)nexops);
190 return (DDI_FAILURE);
191 }
192
193 if (nexops->nops_dma_reset_cookies == NULL) {
194 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
195 "Failing registration for ops vector: %p", f,
196 driver, instance, (void *)nexops);
197 return (DDI_FAILURE);
198 }
199
200 if (nexops->nops_dma_get_cookies == NULL) {
201 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
202 "Failing registration for ops vector: %p", f,
203 driver, instance, (void *)nexops);
204 return (DDI_FAILURE);
205 }
206
207 if (nexops->nops_dma_set_cookies == NULL) {
208 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
209 "Failing registration for ops vector: %p", f,
210 driver, instance, (void *)nexops);
211 return (DDI_FAILURE);
212 }
213
214 if (nexops->nops_dma_clear_cookies == NULL) {
215 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
216 "Failing registration for ops vector: %p", f,
217 driver, instance, (void *)nexops);
218 return (DDI_FAILURE);
219 }
220
221 if (nexops->nops_dma_get_sleep_flags == NULL) {
222 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
223 "Failing registration for ops vector: %p", f,
224 driver, instance, (void *)nexops);
225 return (DDI_FAILURE);
226 }
227
228 if (nexops->nops_dma_win == NULL) {
229 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
230 "Failing registration for ops vector: %p", f,
231 driver, instance, (void *)nexops);
232 return (DDI_FAILURE);
233 }
234
235 if (nexops->nops_dmahdl_setprivate == NULL) {
236 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_setprivate op. "
237 "Failing registration for ops vector: %p", f,
238 driver, instance, (void *)nexops);
239 return (DDI_FAILURE);
240 }
241
242 if (nexops->nops_dmahdl_getprivate == NULL) {
243 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_getprivate op. "
244 "Failing registration for ops vector: %p", f,
245 driver, instance, (void *)nexops);
246 return (DDI_FAILURE);
247 }
248
249 nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
250
251 mutex_enter(&iommulib_lock);
252 if (iommulib_fini == 1) {
253 mutex_exit(&iommulib_lock);
254 cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
255 "Failing NEXUS register.", f);
256 kmem_free(nexp, sizeof (iommulib_nex_t));
257 return (DDI_FAILURE);
258 }
259
260 /*
261 * fini/register race conditions have been handled. Now create the
262 * nexus struct
263 */
264 ndi_hold_devi(dip);
265 nexp->nex_dip = dip;
266 nexp->nex_ops = *nexops;
267
268 mutex_enter(&iommulib_nexus_lock);
269 nexp->nex_next = iommulib_nexus_list;
270 iommulib_nexus_list = nexp;
271 nexp->nex_prev = NULL;
272
273 if (nexp->nex_next != NULL)
274 nexp->nex_next->nex_prev = nexp;
275
276 nexp->nex_ref = 0;
277
278 /*
279 * The nexus device won't be controlled by an IOMMU.
280 */
281 DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
282
283 DEVI(dip)->devi_iommulib_nex_handle = nexp;
284
285 mutex_exit(&iommulib_nexus_lock);
286 mutex_exit(&iommulib_lock);
287
288 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
289 "nexops=%p", f, driver, instance, ddi_node_name(dip),
290 (void *)nexops);
291
292 *handle = nexp;
293
294 return (DDI_SUCCESS);
295 }
296
297 int
iommulib_nexus_unregister(iommulib_nexhandle_t handle)298 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
299 {
300 dev_info_t *dip;
301 int instance;
302 const char *driver;
303 iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
304 const char *f = "iommulib_nexus_unregister";
305
306 ASSERT(nexp);
307
308 if (nexp->nex_ref != 0)
309 return (DDI_FAILURE);
310
311 mutex_enter(&iommulib_nexus_lock);
312
313 dip = nexp->nex_dip;
314 driver = ddi_driver_name(dip);
315 instance = ddi_get_instance(dip);
316
317 /* A future enhancement would be to add ref-counts */
318
319 if (nexp->nex_prev == NULL) {
320 iommulib_nexus_list = nexp->nex_next;
321 } else {
322 nexp->nex_prev->nex_next = nexp->nex_next;
323 }
324
325 if (nexp->nex_next != NULL)
326 nexp->nex_next->nex_prev = nexp->nex_prev;
327
328 mutex_exit(&iommulib_nexus_lock);
329
330 kmem_free(nexp, sizeof (iommulib_nex_t));
331
332 cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
333 "unregistered from IOMMULIB", f, driver, instance,
334 ddi_node_name(dip));
335
336 ndi_rele_devi(dip);
337
338 return (DDI_SUCCESS);
339 }
340
341 int
iommulib_iommu_register(dev_info_t * dip,iommulib_ops_t * ops,iommulib_handle_t * handle)342 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
343 iommulib_handle_t *handle)
344 {
345 const char *vendor;
346 iommulib_unit_t *unitp;
347 int instance = ddi_get_instance(dip);
348 const char *driver = ddi_driver_name(dip);
349 const char *f = "iommulib_register";
350
351 ASSERT(ops);
352 ASSERT(handle);
353
354 if (ops->ilops_vers != IOMMU_OPS_VERSION) {
355 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
356 "in ops vector (%p). Failing registration", f, driver,
357 instance, (void *)ops);
358 return (DDI_FAILURE);
359 }
360
361 switch (ops->ilops_vendor) {
362 case AMD_IOMMU:
363 vendor = "AMD";
364 break;
365 case INTEL_IOMMU:
366 vendor = "Intel";
367 break;
368 case INVALID_VENDOR:
369 cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
370 "Failing registration for ops vector: %p", f,
371 driver, instance, ops->ilops_vendor, (void *)ops);
372 return (DDI_FAILURE);
373 default:
374 cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
375 "Failing registration for ops vector: %p", f,
376 driver, instance, ops->ilops_vendor, (void *)ops);
377 return (DDI_FAILURE);
378 }
379
380 cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
381 " %s", f, driver, instance, vendor);
382
383 if (ops->ilops_data == NULL) {
384 cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
385 "Failing registration for ops vector: %p", f,
386 driver, instance, (void *)ops);
387 return (DDI_FAILURE);
388 }
389
390 if (ops->ilops_id == NULL) {
391 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
392 "Failing registration for ops vector: %p", f,
393 driver, instance, (void *)ops);
394 return (DDI_FAILURE);
395 }
396
397 if (ops->ilops_probe == NULL) {
398 cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
399 "Failing registration for ops vector: %p", f,
400 driver, instance, (void *)ops);
401 return (DDI_FAILURE);
402 }
403
404 if (ops->ilops_dma_allochdl == NULL) {
405 cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
406 "Failing registration for ops vector: %p", f,
407 driver, instance, (void *)ops);
408 return (DDI_FAILURE);
409 }
410
411 if (ops->ilops_dma_freehdl == NULL) {
412 cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
413 "Failing registration for ops vector: %p", f,
414 driver, instance, (void *)ops);
415 return (DDI_FAILURE);
416 }
417
418 if (ops->ilops_dma_bindhdl == NULL) {
419 cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
420 "Failing registration for ops vector: %p", f,
421 driver, instance, (void *)ops);
422 return (DDI_FAILURE);
423 }
424
425 if (ops->ilops_dma_sync == NULL) {
426 cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
427 "Failing registration for ops vector: %p", f,
428 driver, instance, (void *)ops);
429 return (DDI_FAILURE);
430 }
431
432 if (ops->ilops_dma_win == NULL) {
433 cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
434 "Failing registration for ops vector: %p", f,
435 driver, instance, (void *)ops);
436 return (DDI_FAILURE);
437 }
438
439 unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
440 mutex_enter(&iommulib_lock);
441 if (iommulib_fini == 1) {
442 mutex_exit(&iommulib_lock);
443 cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
444 f);
445 kmem_free(unitp, sizeof (iommulib_unit_t));
446 return (DDI_FAILURE);
447 }
448
449 /*
450 * fini/register race conditions have been handled. Now create the
451 * IOMMU unit
452 */
453 mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
454
455 mutex_enter(&unitp->ilu_lock);
456 unitp->ilu_unitid = ++iommulib_unit_ids;
457 unitp->ilu_ref = 0;
458 ndi_hold_devi(dip);
459 unitp->ilu_dip = dip;
460 unitp->ilu_ops = ops;
461 unitp->ilu_data = ops->ilops_data;
462
463 unitp->ilu_next = iommulib_list;
464 iommulib_list = unitp;
465 unitp->ilu_prev = NULL;
466 if (unitp->ilu_next)
467 unitp->ilu_next->ilu_prev = unitp;
468
469 /*
470 * The IOMMU device itself is not controlled by an IOMMU.
471 */
472 DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
473
474 mutex_exit(&unitp->ilu_lock);
475
476 iommulib_num_units++;
477
478 *handle = unitp;
479
480 mutex_exit(&iommulib_lock);
481
482 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
483 "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
484 f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
485 unitp->ilu_unitid);
486
487 return (DDI_SUCCESS);
488 }
489
490 int
iommulib_iommu_unregister(iommulib_handle_t handle)491 iommulib_iommu_unregister(iommulib_handle_t handle)
492 {
493 uint32_t unitid;
494 dev_info_t *dip;
495 int instance;
496 const char *driver;
497 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
498 const char *f = "iommulib_unregister";
499
500 ASSERT(unitp);
501
502 mutex_enter(&iommulib_lock);
503 mutex_enter(&unitp->ilu_lock);
504
505 unitid = unitp->ilu_unitid;
506 dip = unitp->ilu_dip;
507 driver = ddi_driver_name(dip);
508 instance = ddi_get_instance(dip);
509
510 if (unitp->ilu_ref != 0) {
511 mutex_exit(&unitp->ilu_lock);
512 mutex_exit(&iommulib_lock);
513 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
514 "unregister IOMMULIB unitid %u",
515 f, driver, instance, unitid);
516 return (DDI_FAILURE);
517 }
518 unitp->ilu_unitid = 0;
519 ASSERT(unitp->ilu_ref == 0);
520
521 if (unitp->ilu_prev == NULL) {
522 iommulib_list = unitp->ilu_next;
523 unitp->ilu_next->ilu_prev = NULL;
524 } else {
525 unitp->ilu_prev->ilu_next = unitp->ilu_next;
526 unitp->ilu_next->ilu_prev = unitp->ilu_prev;
527 }
528
529 iommulib_num_units--;
530
531 mutex_exit(&unitp->ilu_lock);
532
533 mutex_destroy(&unitp->ilu_lock);
534 kmem_free(unitp, sizeof (iommulib_unit_t));
535
536 mutex_exit(&iommulib_lock);
537
538 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
539 "unregistered", f, driver, instance, unitid);
540
541 ndi_rele_devi(dip);
542
543 return (DDI_SUCCESS);
544 }
545
546 int
iommulib_nex_open(dev_info_t * dip,dev_info_t * rdip)547 iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
548 {
549 iommulib_unit_t *unitp;
550 int instance = ddi_get_instance(rdip);
551 const char *driver = ddi_driver_name(rdip);
552 const char *f = "iommulib_nex_open";
553
554 ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
555 ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
556
557 /* prevent use of IOMMU for AMD IOMMU's DMA */
558 if (strcmp(driver, "amd_iommu") == 0) {
559 DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
560 return (DDI_ENOTSUP);
561 }
562
563 /*
564 * Use the probe entry point to determine in a hardware specific
565 * manner whether this dip is controlled by an IOMMU. If yes,
566 * return the handle corresponding to the IOMMU unit.
567 */
568
569 mutex_enter(&iommulib_lock);
570 for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
571 if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
572 break;
573 }
574
575 if (unitp == NULL) {
576 mutex_exit(&iommulib_lock);
577 if (iommulib_debug) {
578 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
579 cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
580 "controlled by an IOMMU: path=%s", f, driver,
581 instance, (void *)rdip, ddi_pathname(rdip, buf));
582 kmem_free(buf, MAXPATHLEN);
583 }
584 DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
585 return (DDI_ENOTSUP);
586 }
587
588 mutex_enter(&unitp->ilu_lock);
589 unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
590 unitp->ilu_ref++;
591 DEVI(rdip)->devi_iommulib_handle = unitp;
592 mutex_exit(&unitp->ilu_lock);
593 mutex_exit(&iommulib_lock);
594
595 atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);
596
597 return (DDI_SUCCESS);
598 }
599
600 void
iommulib_nex_close(dev_info_t * rdip)601 iommulib_nex_close(dev_info_t *rdip)
602 {
603 iommulib_unit_t *unitp;
604 const char *driver;
605 int instance;
606 uint32_t unitid;
607 iommulib_nex_t *nexp;
608 const char *f = "iommulib_nex_close";
609
610 ASSERT(IOMMU_USED(rdip));
611
612 unitp = DEVI(rdip)->devi_iommulib_handle;
613
614 mutex_enter(&iommulib_lock);
615 mutex_enter(&unitp->ilu_lock);
616
617 nexp = (iommulib_nex_t *)unitp->ilu_nex;
618 DEVI(rdip)->devi_iommulib_handle = NULL;
619
620 unitid = unitp->ilu_unitid;
621 driver = ddi_driver_name(unitp->ilu_dip);
622 instance = ddi_get_instance(unitp->ilu_dip);
623
624 unitp->ilu_ref--;
625 mutex_exit(&unitp->ilu_lock);
626 mutex_exit(&iommulib_lock);
627
628 atomic_dec_uint(&nexp->nex_ref);
629
630 if (iommulib_debug) {
631 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
632 (void) ddi_pathname(rdip, buf);
633 cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
634 "unitid=%u rdip path = %s", f, driver, instance,
635 (void *)rdip, unitid, buf);
636 kmem_free(buf, MAXPATHLEN);
637 }
638 }
639
640 int
iommulib_nexdma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)641 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
642 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
643 caddr_t arg, ddi_dma_handle_t *dma_handlep)
644 {
645 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
646 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
647
648 ASSERT(unitp);
649
650 /* No need to grab lock - the handle is reference counted */
651 return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
652 attr, waitfp, arg, dma_handlep));
653 }
654
655 int
iommulib_nexdma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)656 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
657 ddi_dma_handle_t dma_handle)
658 {
659 int error;
660 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
661 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
662
663 ASSERT(unitp);
664
665 /* No need to grab lock - the handle is reference counted */
666 error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
667 rdip, dma_handle);
668
669 return (error);
670 }
671
672 int
iommulib_nexdma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)673 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
674 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
675 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
676 {
677 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
678 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
679
680 ASSERT(unitp);
681
682 /* No need to grab lock - the handle is reference counted */
683 return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
684 dmareq, cookiep, ccountp));
685 }
686
687 int
iommulib_nexdma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)688 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
689 ddi_dma_handle_t dma_handle)
690 {
691 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
692 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
693
694 ASSERT(unitp);
695
696 /* No need to grab lock - the handle is reference counted */
697 return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
698 dma_handle));
699 }
700
701 int
iommulib_nexdma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cache_flags)702 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
703 ddi_dma_handle_t dma_handle, off_t off, size_t len,
704 uint_t cache_flags)
705 {
706 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
707 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
708
709 ASSERT(unitp);
710
711 /* No need to grab lock - the handle is reference counted */
712 return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
713 off, len, cache_flags));
714 }
715
716 int
iommulib_nexdma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)717 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
718 ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
719 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
720 {
721 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
722 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
723
724 ASSERT(unitp);
725
726 /* No need to grab lock - the handle is reference counted */
727 return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
728 win, offp, lenp, cookiep, ccountp));
729 }
730
731 int
iommulib_nexdma_mapobject(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)732 iommulib_nexdma_mapobject(dev_info_t *dip, dev_info_t *rdip,
733 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
734 ddi_dma_obj_t *dmao)
735 {
736 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
737 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
738
739 return (unitp->ilu_ops->ilops_dma_mapobject(handle, dip, rdip,
740 dma_handle, dmareq, dmao));
741 }
742
743 int
iommulib_nexdma_unmapobject(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)744 iommulib_nexdma_unmapobject(dev_info_t *dip, dev_info_t *rdip,
745 ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
746 {
747 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
748 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
749
750 return (unitp->ilu_ops->ilops_dma_unmapobject(handle, dip, rdip,
751 dma_handle, dmao));
752 }
753
754 /* Utility routines invoked by IOMMU drivers */
755 int
iommulib_iommu_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)756 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
757 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
758 ddi_dma_handle_t *handlep)
759 {
760 iommulib_nexops_t *nexops;
761
762 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
763 return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
764 handlep));
765 }
766
767 int
iommulib_iommu_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)768 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
769 ddi_dma_handle_t handle)
770 {
771 iommulib_nexops_t *nexops;
772
773 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
774 ASSERT(nexops);
775 return (nexops->nops_dma_freehdl(dip, rdip, handle));
776 }
777
778 int
iommulib_iommu_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)779 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
780 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
781 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
782 {
783 iommulib_nexops_t *nexops;
784
785 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
786 return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
787 cookiep, ccountp));
788 }
789
790 int
iommulib_iommu_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)791 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
792 ddi_dma_handle_t handle)
793 {
794 iommulib_nexops_t *nexops;
795
796 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
797 return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
798 }
799
800 void
iommulib_iommu_dma_reset_cookies(dev_info_t * dip,ddi_dma_handle_t handle)801 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
802 {
803 iommulib_nexops_t *nexops;
804
805 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
806 nexops->nops_dma_reset_cookies(dip, handle);
807 }
808
809 int
iommulib_iommu_dma_get_cookies(dev_info_t * dip,ddi_dma_handle_t handle,ddi_dma_cookie_t ** cookiepp,uint_t * ccountp)810 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
811 ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
812 {
813 iommulib_nexops_t *nexops;
814
815 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
816 return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
817 }
818
819 int
iommulib_iommu_dma_set_cookies(dev_info_t * dip,ddi_dma_handle_t handle,ddi_dma_cookie_t * cookiep,uint_t ccount)820 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
821 ddi_dma_cookie_t *cookiep, uint_t ccount)
822 {
823 iommulib_nexops_t *nexops;
824
825 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
826 return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
827 }
828
829 int
iommulib_iommu_dma_clear_cookies(dev_info_t * dip,ddi_dma_handle_t handle)830 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
831 {
832 iommulib_nexops_t *nexops;
833
834 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
835 return (nexops->nops_dma_clear_cookies(dip, handle));
836 }
837
838 int
iommulib_iommu_dma_get_sleep_flags(dev_info_t * dip,ddi_dma_handle_t handle)839 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
840 {
841 iommulib_nexops_t *nexops;
842
843 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
844 return (nexops->nops_dma_get_sleep_flags(handle));
845 }
846
847 int
iommulib_iommu_dma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)848 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
849 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
850 {
851 iommulib_nexops_t *nexops;
852
853 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
854 return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
855 cache_flags));
856 }
857
858 int
iommulib_iommu_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)859 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
860 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
861 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
862 {
863 iommulib_nexops_t *nexops;
864
865 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
866 return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
867 cookiep, ccountp));
868 }
869
870 int
iommulib_iommu_dmahdl_setprivate(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,void * priv)871 iommulib_iommu_dmahdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
872 ddi_dma_handle_t handle, void *priv)
873 {
874 iommulib_nexops_t *nexops;
875
876 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
877 return (nexops->nops_dmahdl_setprivate(dip, rdip, handle, priv));
878 }
879
880 void *
iommulib_iommu_dmahdl_getprivate(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)881 iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
882 ddi_dma_handle_t handle)
883 {
884 iommulib_nexops_t *nexops;
885
886 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
887 return (nexops->nops_dmahdl_getprivate(dip, rdip, handle));
888 }
889
890 int
iommulib_iommu_getunitid(iommulib_handle_t handle,uint64_t * unitidp)891 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
892 {
893 iommulib_unit_t *unitp;
894 uint64_t unitid;
895
896 unitp = (iommulib_unit_t *)handle;
897
898 ASSERT(unitp);
899 ASSERT(unitidp);
900
901 mutex_enter(&unitp->ilu_lock);
902 unitid = unitp->ilu_unitid;
903 mutex_exit(&unitp->ilu_lock);
904
905 ASSERT(unitid > 0);
906 *unitidp = (uint64_t)unitid;
907
908 return (DDI_SUCCESS);
909 }
910
911 dev_info_t *
iommulib_iommu_getdip(iommulib_handle_t handle)912 iommulib_iommu_getdip(iommulib_handle_t handle)
913 {
914 iommulib_unit_t *unitp;
915 dev_info_t *dip;
916
917 unitp = (iommulib_unit_t *)handle;
918
919 ASSERT(unitp);
920
921 mutex_enter(&unitp->ilu_lock);
922 dip = unitp->ilu_dip;
923 ASSERT(dip);
924 ndi_hold_devi(dip);
925 mutex_exit(&unitp->ilu_lock);
926
927 return (dip);
928 }
929
930 iommulib_ops_t *
iommulib_iommu_getops(iommulib_handle_t handle)931 iommulib_iommu_getops(iommulib_handle_t handle)
932 {
933 iommulib_unit_t *unitp;
934 iommulib_ops_t *ops;
935
936 unitp = (iommulib_unit_t *)handle;
937
938 ASSERT(unitp);
939
940 mutex_enter(&unitp->ilu_lock);
941 ops = unitp->ilu_ops;
942 mutex_exit(&unitp->ilu_lock);
943
944 ASSERT(ops);
945
946 return (ops);
947 }
948
949 void *
iommulib_iommu_getdata(iommulib_handle_t handle)950 iommulib_iommu_getdata(iommulib_handle_t handle)
951 {
952 iommulib_unit_t *unitp;
953 void *data;
954
955 unitp = (iommulib_unit_t *)handle;
956
957 ASSERT(unitp);
958
959 mutex_enter(&unitp->ilu_lock);
960 data = unitp->ilu_data;
961 mutex_exit(&unitp->ilu_lock);
962
963 ASSERT(data);
964
965 return (data);
966 }
967