1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "@(#)iommulib.c 1.6 08/09/07 SMI" 27 28 #include <sys/sunddi.h> 29 #include <sys/sunndi.h> 30 #include <sys/errno.h> 31 #include <sys/modctl.h> 32 #include <sys/iommulib.h> 33 34 /* ******** Type definitions private to this file ********************** */ 35 36 /* 1 per IOMMU unit. There may be more than one per dip */ 37 typedef struct iommulib_unit { 38 kmutex_t ilu_lock; 39 uint64_t ilu_ref; 40 uint32_t ilu_unitid; 41 dev_info_t *ilu_dip; 42 iommulib_ops_t *ilu_ops; 43 void* ilu_data; 44 struct iommulib_unit *ilu_next; 45 struct iommulib_unit *ilu_prev; 46 } iommulib_unit_t; 47 48 typedef struct iommulib_cache { 49 dev_info_t *cache_rdip; 50 iommulib_unit_t *cache_unit; 51 struct iommulib_cache *cache_next; 52 struct iommulib_cache *cache_prev; 53 } iommulib_cache_t; 54 55 typedef struct iommulib_nex { 56 dev_info_t *nex_dip; 57 iommulib_nexops_t nex_ops; 58 struct iommulib_nex *nex_next; 59 struct iommulib_nex *nex_prev; 60 } iommulib_nex_t; 61 62 /* ********* Function prototypes ********************* */ 63 static int lookup_cache(dev_info_t *rdip, iommulib_unit_t **unitpp); 64 static void insert_cache(dev_info_t *rdip, iommulib_unit_t *unitp); 65 66 67 /* ********* Globals ************************ */ 68 69 /* IOMMU side: Following data protected by lock */ 70 static kmutex_t iommulib_lock; 71 static iommulib_unit_t *iommulib_list; 72 static uint64_t iommulib_unit_ids = 0; 73 static uint64_t iommulib_num_units = 0; 74 75 /* rootnex side data */ 76 77 static kmutex_t iommulib_nexus_lock; 78 static iommulib_nex_t *iommulib_nexus_list; 79 80 #define IOMMULIB_CACHE_SIZE 256 81 static kmutex_t iommulib_cache_lock; 82 static iommulib_cache_t **iommulib_cache; 83 84 /* tunable via /etc/system */ 85 static uint_t iommulib_cache_size = IOMMULIB_CACHE_SIZE; 86 87 /* can be set atomically without lock */ 88 static volatile uint32_t iommulib_fini; 89 90 /* debug flag */ 91 static int iommulib_debug; 92 93 /* 94 * Module linkage information for the kernel. 95 */ 96 static struct modlmisc modlmisc = { 97 &mod_miscops, "IOMMU library module" 98 }; 99 100 static struct modlinkage modlinkage = { 101 MODREV_1, (void *)&modlmisc, NULL 102 }; 103 104 int 105 _init(void) 106 { 107 /* 108 * static mutexes automagically initialized 109 * by being allocated in zeroed memory 110 */ 111 mutex_enter(&iommulib_cache_lock); 112 iommulib_cache = kmem_zalloc( 113 sizeof (iommulib_cache_t *) * iommulib_cache_size, KM_SLEEP); 114 mutex_exit(&iommulib_cache_lock); 115 116 return (mod_install(&modlinkage)); 117 } 118 119 int 120 _fini(void) 121 { 122 mutex_enter(&iommulib_lock); 123 if (iommulib_list != NULL || iommulib_nexus_list != NULL) { 124 mutex_exit(&iommulib_lock); 125 return (EBUSY); 126 } 127 iommulib_fini = 1; 128 129 mutex_enter(&iommulib_cache_lock); 130 kmem_free(iommulib_cache, 131 sizeof (iommulib_cache_t *) * iommulib_cache_size); 132 iommulib_cache = NULL; 133 mutex_exit(&iommulib_cache_lock); 134 135 mutex_exit(&iommulib_lock); 136 return (mod_remove(&modlinkage)); 137 } 138 139 int 140 _info(struct modinfo *modinfop) 141 { 142 return (mod_info(&modlinkage, modinfop)); 143 } 144 145 /* 146 * Routines with iommulib_iommu_* are invoked from the 147 * IOMMU driver. 148 * Routines with iommulib_nex* are invoked from the 149 * nexus driver (typically rootnex) 150 */ 151 152 int 153 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops, 154 iommulib_nexhandle_t *handle) 155 { 156 iommulib_nex_t *nexp; 157 int instance = ddi_get_instance(dip); 158 const char *driver = ddi_driver_name(dip); 159 dev_info_t *pdip = ddi_get_parent(dip); 160 const char *f = "iommulib_nexus_register"; 161 162 ASSERT(nexops); 163 ASSERT(handle); 164 165 *handle = NULL; 166 167 /* 168 * Root node is never busy held 169 */ 170 if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED || 171 !DEVI_BUSY_OWNED(pdip))) { 172 cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED " 173 "or busy held for nexops vector (%p). Failing registration", 174 f, (void *)nexops); 175 return (DDI_FAILURE); 176 } 177 178 if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) { 179 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version " 180 "in nexops vector (%p). Failing NEXUS registration", 181 f, driver, instance, (void *)nexops); 182 return (DDI_FAILURE); 183 } 184 185 ASSERT(nexops->nops_data == NULL); 186 187 if (nexops->nops_id == NULL) { 188 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. " 189 "Failing registration for nexops vector: %p", 190 f, driver, instance, (void *)nexops); 191 return (DDI_FAILURE); 192 } 193 194 if (nexops->nops_dma_allochdl == NULL) { 195 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. " 196 "Failing registration for ops vector: %p", f, 197 driver, instance, (void *)nexops); 198 return (DDI_FAILURE); 199 } 200 201 if (nexops->nops_dma_freehdl == NULL) { 202 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. " 203 "Failing registration for ops vector: %p", f, 204 driver, instance, (void *)nexops); 205 return (DDI_FAILURE); 206 } 207 208 if (nexops->nops_dma_bindhdl == NULL) { 209 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. " 210 "Failing registration for ops vector: %p", f, 211 driver, instance, (void *)nexops); 212 return (DDI_FAILURE); 213 } 214 215 if (nexops->nops_dma_sync == NULL) { 216 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. " 217 "Failing registration for ops vector: %p", f, 218 driver, instance, (void *)nexops); 219 return (DDI_FAILURE); 220 } 221 222 223 if (nexops->nops_dma_reset_cookies == NULL) { 224 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. " 225 "Failing registration for ops vector: %p", f, 226 driver, instance, (void *)nexops); 227 return (DDI_FAILURE); 228 } 229 230 if (nexops->nops_dma_get_cookies == NULL) { 231 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. " 232 "Failing registration for ops vector: %p", f, 233 driver, instance, (void *)nexops); 234 return (DDI_FAILURE); 235 } 236 237 if (nexops->nops_dma_win == NULL) { 238 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. " 239 "Failing registration for ops vector: %p", f, 240 driver, instance, (void *)nexops); 241 return (DDI_FAILURE); 242 } 243 244 /* Check for legacy ops */ 245 if (nexops->nops_dma_map == NULL) { 246 cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. " 247 "Failing registration for ops vector: %p", f, 248 driver, instance, (void *)nexops); 249 return (DDI_FAILURE); 250 } 251 252 if (nexops->nops_dma_mctl == NULL) { 253 cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. " 254 "Failing registration for ops vector: %p", f, 255 driver, instance, (void *)nexops); 256 return (DDI_FAILURE); 257 } 258 259 nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP); 260 261 mutex_enter(&iommulib_lock); 262 if (iommulib_fini == 1) { 263 mutex_exit(&iommulib_lock); 264 cmn_err(CE_WARN, "%s: IOMMULIB unloading. " 265 "Failing NEXUS register.", f); 266 kmem_free(nexp, sizeof (iommulib_nex_t)); 267 return (DDI_FAILURE); 268 } 269 270 /* 271 * fini/register race conditions have been handled. Now create the 272 * nexus struct 273 */ 274 ndi_hold_devi(dip); 275 nexp->nex_dip = dip; 276 nexp->nex_ops = *nexops; 277 278 mutex_enter(&iommulib_nexus_lock); 279 nexp->nex_next = iommulib_nexus_list; 280 iommulib_nexus_list = nexp; 281 nexp->nex_prev = NULL; 282 283 if (nexp->nex_next != NULL) 284 nexp->nex_next->nex_prev = nexp; 285 286 mutex_exit(&iommulib_nexus_lock); 287 mutex_exit(&iommulib_lock); 288 289 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s " 290 "nexops=%p", f, driver, instance, ddi_node_name(dip), 291 (void *)nexops); 292 293 *handle = nexp; 294 295 return (DDI_SUCCESS); 296 } 297 298 int 299 iommulib_nexus_unregister(iommulib_nexhandle_t handle) 300 { 301 dev_info_t *dip; 302 int instance; 303 const char *driver; 304 iommulib_nex_t *nexp = (iommulib_nex_t *)handle; 305 const char *f = "iommulib_nexus_unregister"; 306 307 ASSERT(nexp); 308 309 mutex_enter(&iommulib_nexus_lock); 310 311 dip = nexp->nex_dip; 312 driver = ddi_driver_name(dip); 313 instance = ddi_get_instance(dip); 314 315 /* A future enhancement would be to add ref-counts */ 316 317 if (nexp->nex_prev == NULL) { 318 iommulib_nexus_list = nexp->nex_next; 319 } else { 320 nexp->nex_prev->nex_next = nexp->nex_next; 321 } 322 323 if (nexp->nex_next != NULL) 324 nexp->nex_next->nex_prev = nexp->nex_prev; 325 326 mutex_exit(&iommulib_nexus_lock); 327 328 kmem_free(nexp, sizeof (iommulib_nex_t)); 329 330 cmn_err(CE_WARN, "%s: %s%d: NEXUS (%s) handle successfully " 331 "unregistered from IOMMULIB", f, driver, instance, 332 ddi_node_name(dip)); 333 334 ndi_rele_devi(dip); 335 336 return (DDI_SUCCESS); 337 } 338 339 static iommulib_nexops_t * 340 lookup_nexops(dev_info_t *dip) 341 { 342 iommulib_nex_t *nexp; 343 344 mutex_enter(&iommulib_nexus_lock); 345 nexp = iommulib_nexus_list; 346 while (nexp) { 347 if (nexp->nex_dip == dip) 348 break; 349 nexp = nexp->nex_next; 350 } 351 mutex_exit(&iommulib_nexus_lock); 352 353 return (nexp ? &nexp->nex_ops : NULL); 354 } 355 356 int 357 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops, 358 iommulib_handle_t *handle) 359 { 360 const char *vendor; 361 iommulib_unit_t *unitp; 362 int instance = ddi_get_instance(dip); 363 const char *driver = ddi_driver_name(dip); 364 dev_info_t *pdip = ddi_get_parent(dip); 365 const char *f = "iommulib_register"; 366 367 ASSERT(ops); 368 ASSERT(handle); 369 370 if (i_ddi_node_state(dip) < DS_PROBED || !DEVI_BUSY_OWNED(pdip)) { 371 cmn_err(CE_WARN, "%s: devinfo node not in DS_PROBED or " 372 "busy held for ops vector (%p). Failing registration", 373 f, (void *)ops); 374 return (DDI_FAILURE); 375 } 376 377 378 if (ops->ilops_vers != IOMMU_OPS_VERSION) { 379 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version " 380 "in ops vector (%p). Failing registration", f, driver, 381 instance, (void *)ops); 382 return (DDI_FAILURE); 383 } 384 385 switch (ops->ilops_vendor) { 386 case AMD_IOMMU: 387 vendor = "AMD"; 388 break; 389 case INTEL_IOMMU: 390 vendor = "Intel"; 391 break; 392 case INVALID_VENDOR: 393 cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. " 394 "Failing registration for ops vector: %p", f, 395 driver, instance, ops->ilops_vendor, (void *)ops); 396 return (DDI_FAILURE); 397 default: 398 cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). " 399 "Failing registration for ops vector: %p", f, 400 driver, instance, ops->ilops_vendor, (void *)ops); 401 return (DDI_FAILURE); 402 } 403 404 cmn_err(CE_NOTE, "%s: %s%d: Detected IOMMU registration from vendor %s", 405 f, driver, instance, vendor); 406 407 if (ops->ilops_data == NULL) { 408 cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. " 409 "Failing registration for ops vector: %p", f, 410 driver, instance, (void *)ops); 411 return (DDI_FAILURE); 412 } 413 414 if (ops->ilops_id == NULL) { 415 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. " 416 "Failing registration for ops vector: %p", f, 417 driver, instance, (void *)ops); 418 return (DDI_FAILURE); 419 } 420 421 if (ops->ilops_probe == NULL) { 422 cmn_err(CE_WARN, "%s: %s%d: NULL probe op. " 423 "Failing registration for ops vector: %p", f, 424 driver, instance, (void *)ops); 425 return (DDI_FAILURE); 426 } 427 428 if (ops->ilops_dma_allochdl == NULL) { 429 cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. " 430 "Failing registration for ops vector: %p", f, 431 driver, instance, (void *)ops); 432 return (DDI_FAILURE); 433 } 434 435 if (ops->ilops_dma_freehdl == NULL) { 436 cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. " 437 "Failing registration for ops vector: %p", f, 438 driver, instance, (void *)ops); 439 return (DDI_FAILURE); 440 } 441 442 if (ops->ilops_dma_bindhdl == NULL) { 443 cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. " 444 "Failing registration for ops vector: %p", f, 445 driver, instance, (void *)ops); 446 return (DDI_FAILURE); 447 } 448 449 if (ops->ilops_dma_sync == NULL) { 450 cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. " 451 "Failing registration for ops vector: %p", f, 452 driver, instance, (void *)ops); 453 return (DDI_FAILURE); 454 } 455 456 if (ops->ilops_dma_win == NULL) { 457 cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. " 458 "Failing registration for ops vector: %p", f, 459 driver, instance, (void *)ops); 460 return (DDI_FAILURE); 461 } 462 463 /* Check for legacy ops */ 464 if (ops->ilops_dma_map == NULL) { 465 cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. " 466 "Failing registration for ops vector: %p", f, 467 driver, instance, (void *)ops); 468 return (DDI_FAILURE); 469 } 470 471 if (ops->ilops_dma_mctl == NULL) { 472 cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. " 473 "Failing registration for ops vector: %p", f, 474 driver, instance, (void *)ops); 475 return (DDI_FAILURE); 476 } 477 478 unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP); 479 mutex_enter(&iommulib_lock); 480 if (iommulib_fini == 1) { 481 mutex_exit(&iommulib_lock); 482 cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.", 483 f); 484 kmem_free(unitp, sizeof (iommulib_unit_t)); 485 return (DDI_FAILURE); 486 } 487 488 /* 489 * fini/register race conditions have been handled. Now create the 490 * IOMMU unit 491 */ 492 mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL); 493 494 mutex_enter(&unitp->ilu_lock); 495 unitp->ilu_unitid = ++iommulib_unit_ids; 496 unitp->ilu_ref = 0; 497 ndi_hold_devi(dip); 498 unitp->ilu_dip = dip; 499 unitp->ilu_ops = ops; 500 unitp->ilu_data = ops->ilops_data; 501 502 unitp->ilu_next = iommulib_list; 503 unitp->ilu_prev = NULL; 504 iommulib_list->ilu_prev = unitp; 505 iommulib_list = unitp; 506 507 mutex_exit(&unitp->ilu_lock); 508 509 iommulib_num_units++; 510 511 *handle = unitp; 512 513 mutex_exit(&iommulib_lock); 514 515 cmn_err(CE_NOTE, "%s: %s%d: Succesfully registered IOMMU unit " 516 "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u", 517 f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data, 518 unitp->ilu_unitid); 519 520 return (DDI_SUCCESS); 521 } 522 523 int 524 iommulib_iommu_unregister(iommulib_handle_t handle) 525 { 526 uint32_t unitid; 527 dev_info_t *dip; 528 int instance; 529 const char *driver; 530 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 531 const char *f = "iommulib_unregister"; 532 533 ASSERT(unitp); 534 535 mutex_enter(&iommulib_lock); 536 mutex_enter(&unitp->ilu_lock); 537 538 unitid = unitp->ilu_unitid; 539 dip = unitp->ilu_dip; 540 driver = ddi_driver_name(dip); 541 instance = ddi_get_instance(dip); 542 543 if (unitp->ilu_ref != 0) { 544 mutex_exit(&unitp->ilu_lock); 545 mutex_exit(&iommulib_lock); 546 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot " 547 "unregister IOMMULIB unitid %u", 548 f, driver, instance, unitid); 549 return (DDI_FAILURE); 550 } 551 unitp->ilu_unitid = 0; 552 ASSERT(unitp->ilu_ref == 0); 553 554 if (unitp->ilu_prev == NULL) { 555 iommulib_list = unitp->ilu_next; 556 unitp->ilu_next->ilu_prev = NULL; 557 } else { 558 unitp->ilu_prev->ilu_next = unitp->ilu_next; 559 unitp->ilu_next->ilu_prev = unitp->ilu_prev; 560 } 561 562 iommulib_num_units--; 563 564 mutex_exit(&unitp->ilu_lock); 565 566 mutex_destroy(&unitp->ilu_lock); 567 kmem_free(unitp, sizeof (iommulib_unit_t)); 568 569 mutex_exit(&iommulib_lock); 570 571 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully " 572 "unregistered", f, driver, instance, unitid); 573 574 ndi_rele_devi(dip); 575 576 return (DDI_SUCCESS); 577 } 578 579 int 580 iommulib_nex_open(dev_info_t *rdip, uint_t *errorp) 581 { 582 iommulib_unit_t *unitp; 583 int instance = ddi_get_instance(rdip); 584 const char *driver = ddi_driver_name(rdip); 585 const char *f = "iommulib_nex_open"; 586 587 *errorp = 0; 588 DEVI(rdip)->devi_iommulib_handle = NULL; 589 590 /* prevent use of IOMMU for AMD IOMMU's DMA */ 591 if (strcmp(driver, "amd_iommu") == 0) { 592 *errorp = ENOTSUP; 593 return (DDI_FAILURE); 594 } 595 596 if (lookup_cache(rdip, &unitp) == DDI_SUCCESS) { 597 DEVI(rdip)->devi_iommulib_handle = 598 (iommulib_handle_t)unitp; 599 return (DDI_SUCCESS); 600 } 601 602 603 /* 604 * Ok this dip is not in the cache. Use the probe entry point 605 * to determine in a hardware specific manner whether this 606 * dip is controlled by an IOMMU. If yes, insert it into the 607 * cache and return the handle corresponding to the IOMMU unit. 608 */ 609 610 mutex_enter(&iommulib_lock); 611 for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) { 612 if (unitp->ilu_ops->ilops_probe(rdip) == DDI_SUCCESS) 613 break; 614 } 615 616 if (unitp == NULL) { 617 mutex_exit(&iommulib_lock); 618 if (iommulib_debug) { 619 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 620 cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not " 621 "controlled by an IOMMU: path=%s", f, driver, 622 instance, (void *)rdip, ddi_pathname(rdip, buf)); 623 kmem_free(buf, MAXPATHLEN); 624 } 625 *errorp = ENOTSUP; 626 return (DDI_FAILURE); 627 } 628 629 mutex_enter(&unitp->ilu_lock); 630 unitp->ilu_ref++; 631 mutex_exit(&unitp->ilu_lock); 632 mutex_exit(&iommulib_lock); 633 634 insert_cache(rdip, unitp); 635 636 DEVI(rdip)->devi_iommulib_handle = unitp; 637 638 return (DDI_SUCCESS); 639 } 640 641 void 642 iommulib_nex_close(dev_info_t *rdip) 643 { 644 iommulib_unit_t *unitp; 645 const char *driver; 646 int instance; 647 uint32_t unitid; 648 const char *f = "iommulib_nex_close"; 649 650 unitp = (iommulib_unit_t *)DEVI(rdip)->devi_iommulib_handle; 651 if (unitp == NULL) 652 return; 653 654 DEVI(rdip)->devi_iommulib_handle = NULL; 655 656 /* 657 * Assume we don't support DR of IOMMUs. The mapping of 658 * dips to IOMMU units should not change. Let the mapping 659 * persist in the cache. 660 */ 661 662 mutex_enter(&iommulib_lock); 663 mutex_enter(&unitp->ilu_lock); 664 unitid = unitp->ilu_unitid; 665 driver = ddi_driver_name(unitp->ilu_dip); 666 instance = ddi_get_instance(unitp->ilu_dip); 667 unitp->ilu_ref--; 668 mutex_exit(&unitp->ilu_lock); 669 mutex_exit(&iommulib_lock); 670 671 if (iommulib_debug) { 672 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 673 (void) ddi_pathname(rdip, buf); 674 cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), " 675 "unitid=%u rdip path = %s", f, driver, instance, 676 (void *)rdip, unitid, buf); 677 kmem_free(buf, MAXPATHLEN); 678 } 679 } 680 681 int 682 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip, 683 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), 684 caddr_t arg, ddi_dma_handle_t *dma_handlep) 685 { 686 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 687 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 688 689 ASSERT(unitp); 690 691 /* No need to grab lock - the handle is reference counted */ 692 return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip, 693 attr, waitfp, arg, dma_handlep)); 694 } 695 696 int 697 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip, 698 ddi_dma_handle_t dma_handle) 699 { 700 int error; 701 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 702 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 703 704 ASSERT(unitp); 705 706 /* No need to grab lock - the handle is reference counted */ 707 error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip, 708 rdip, dma_handle); 709 710 iommulib_nex_close(rdip); 711 712 return (error); 713 } 714 715 int 716 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 717 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq, 718 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 719 { 720 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 721 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 722 723 ASSERT(unitp); 724 725 /* No need to grab lock - the handle is reference counted */ 726 return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle, 727 dmareq, cookiep, ccountp)); 728 } 729 730 int 731 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 732 ddi_dma_handle_t dma_handle) 733 { 734 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 735 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 736 737 ASSERT(unitp); 738 739 /* No need to grab lock - the handle is reference counted */ 740 return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip, 741 dma_handle)); 742 } 743 744 int 745 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip, 746 ddi_dma_handle_t dma_handle, off_t off, size_t len, 747 uint_t cache_flags) 748 { 749 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 750 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 751 752 ASSERT(unitp); 753 754 /* No need to grab lock - the handle is reference counted */ 755 return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle, 756 off, len, cache_flags)); 757 } 758 759 int 760 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip, 761 ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp, 762 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 763 { 764 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 765 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 766 767 ASSERT(unitp); 768 769 /* No need to grab lock - the handle is reference counted */ 770 return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle, 771 win, offp, lenp, cookiep, ccountp)); 772 } 773 774 /* Obsolete DMA routines */ 775 776 int 777 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip, 778 struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle) 779 { 780 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 781 iommulib_unit_t *unitp = handle; 782 783 ASSERT(unitp); 784 785 /* No need to grab lock - the handle is reference counted */ 786 return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq, 787 dma_handle)); 788 } 789 790 int 791 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip, 792 ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request, 793 off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags) 794 { 795 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 796 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 797 798 ASSERT(unitp); 799 800 /* No need to grab lock - the handle is reference counted */ 801 return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle, 802 request, offp, lenp, objpp, cache_flags)); 803 } 804 805 /* Utility routines invoked by IOMMU drivers */ 806 int 807 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 808 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 809 ddi_dma_handle_t *handlep) 810 { 811 iommulib_nexops_t *nexops = lookup_nexops(dip); 812 if (nexops == NULL) 813 return (DDI_FAILURE); 814 return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg, 815 handlep)); 816 } 817 818 int 819 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 820 ddi_dma_handle_t handle) 821 { 822 iommulib_nexops_t *nexops = lookup_nexops(dip); 823 if (nexops == NULL) 824 return (DDI_FAILURE); 825 return (nexops->nops_dma_freehdl(dip, rdip, handle)); 826 } 827 828 int 829 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 830 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 831 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 832 { 833 iommulib_nexops_t *nexops = lookup_nexops(dip); 834 if (nexops == NULL) 835 return (DDI_FAILURE); 836 return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq, 837 cookiep, ccountp)); 838 } 839 840 int 841 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 842 ddi_dma_handle_t handle) 843 { 844 iommulib_nexops_t *nexops = lookup_nexops(dip); 845 if (nexops == NULL) 846 return (DDI_FAILURE); 847 return (nexops->nops_dma_unbindhdl(dip, rdip, handle)); 848 } 849 850 void 851 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 852 { 853 iommulib_nexops_t *nexops = lookup_nexops(dip); 854 nexops->nops_dma_reset_cookies(dip, handle); 855 } 856 857 int 858 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 859 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 860 { 861 iommulib_nexops_t *nexops = lookup_nexops(dip); 862 if (nexops == NULL) 863 return (DDI_FAILURE); 864 return (nexops->nops_dma_get_cookies(dip, handle, cookiep, ccountp)); 865 } 866 867 int 868 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip, 869 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags) 870 { 871 iommulib_nexops_t *nexops = lookup_nexops(dip); 872 if (nexops == NULL) 873 return (DDI_FAILURE); 874 return (nexops->nops_dma_sync(dip, rdip, handle, off, len, 875 cache_flags)); 876 } 877 878 int 879 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 880 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 881 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 882 { 883 iommulib_nexops_t *nexops = lookup_nexops(dip); 884 if (nexops == NULL) 885 return (DDI_FAILURE); 886 return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp, 887 cookiep, ccountp)); 888 } 889 890 int 891 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip, 892 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 893 { 894 iommulib_nexops_t *nexops = lookup_nexops(dip); 895 if (nexops == NULL) 896 return (DDI_FAILURE); 897 return (nexops->nops_dma_map(dip, rdip, dmareq, handlep)); 898 } 899 900 int 901 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 902 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp, 903 size_t *lenp, caddr_t *objpp, uint_t cache_flags) 904 { 905 iommulib_nexops_t *nexops = lookup_nexops(dip); 906 if (nexops == NULL) 907 return (DDI_FAILURE); 908 return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp, 909 objpp, cache_flags)); 910 } 911 912 int 913 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp) 914 { 915 iommulib_unit_t *unitp; 916 uint64_t unitid; 917 918 unitp = (iommulib_unit_t *)handle; 919 920 ASSERT(unitp); 921 ASSERT(unitidp); 922 923 mutex_enter(&unitp->ilu_lock); 924 unitid = unitp->ilu_unitid; 925 mutex_exit(&unitp->ilu_lock); 926 927 ASSERT(unitid > 0); 928 *unitidp = (uint64_t)unitid; 929 930 return (DDI_SUCCESS); 931 } 932 933 dev_info_t * 934 iommulib_iommu_getdip(iommulib_handle_t handle) 935 { 936 iommulib_unit_t *unitp; 937 dev_info_t *dip; 938 939 unitp = (iommulib_unit_t *)handle; 940 941 ASSERT(unitp); 942 943 mutex_enter(&unitp->ilu_lock); 944 dip = unitp->ilu_dip; 945 ASSERT(dip); 946 ndi_hold_devi(dip); 947 mutex_exit(&unitp->ilu_lock); 948 949 return (dip); 950 } 951 952 iommulib_ops_t * 953 iommulib_iommu_getops(iommulib_handle_t handle) 954 { 955 iommulib_unit_t *unitp; 956 iommulib_ops_t *ops; 957 958 unitp = (iommulib_unit_t *)handle; 959 960 ASSERT(unitp); 961 962 mutex_enter(&unitp->ilu_lock); 963 ops = unitp->ilu_ops; 964 mutex_exit(&unitp->ilu_lock); 965 966 ASSERT(ops); 967 968 return (ops); 969 } 970 971 void * 972 iommulib_iommu_getdata(iommulib_handle_t handle) 973 { 974 iommulib_unit_t *unitp; 975 void *data; 976 977 unitp = (iommulib_unit_t *)handle; 978 979 ASSERT(unitp); 980 981 mutex_enter(&unitp->ilu_lock); 982 data = unitp->ilu_data; 983 mutex_exit(&unitp->ilu_lock); 984 985 ASSERT(data); 986 987 return (data); 988 } 989 990 /* 991 * Internal routines 992 */ 993 994 static uint32_t 995 hashfn(uint64_t ptr) 996 { 997 return (ptr % iommulib_cache_size); 998 } 999 1000 static int 1001 lookup_cache(dev_info_t *rdip, iommulib_unit_t **unitpp) 1002 { 1003 uint64_t idx; 1004 iommulib_cache_t *cachep; 1005 iommulib_unit_t *unitp; 1006 int retval = DDI_FAILURE; 1007 1008 *unitpp = NULL; 1009 1010 mutex_enter(&iommulib_lock); 1011 mutex_enter(&iommulib_cache_lock); 1012 1013 ASSERT(iommulib_cache); 1014 1015 idx = hashfn((uint64_t)(uintptr_t)rdip); 1016 1017 ASSERT(idx < iommulib_cache_size); 1018 1019 for (cachep = iommulib_cache[idx]; cachep; 1020 cachep = cachep->cache_next) { 1021 if (cachep->cache_rdip == rdip) 1022 break; 1023 } 1024 1025 if (cachep != NULL) { 1026 unitp = cachep->cache_unit; 1027 mutex_enter(&unitp->ilu_lock); 1028 unitp->ilu_ref++; 1029 mutex_exit(&unitp->ilu_lock); 1030 *unitpp = unitp; 1031 retval = DDI_SUCCESS; 1032 } 1033 1034 mutex_exit(&iommulib_cache_lock); 1035 mutex_exit(&iommulib_lock); 1036 return (retval); 1037 } 1038 1039 static void 1040 insert_cache(dev_info_t *rdip, iommulib_unit_t *unitp) 1041 { 1042 uint32_t idx; 1043 iommulib_cache_t *cachep; 1044 1045 mutex_enter(&iommulib_lock); 1046 mutex_enter(&iommulib_cache_lock); 1047 1048 ASSERT(iommulib_cache); 1049 1050 idx = hashfn((uint64_t)(uintptr_t)rdip); 1051 1052 ASSERT(idx < iommulib_cache_size); 1053 1054 for (cachep = iommulib_cache[idx]; cachep; 1055 cachep = cachep->cache_next) { 1056 if (cachep->cache_rdip == rdip) 1057 break; 1058 } 1059 1060 if (cachep == NULL) { 1061 cachep = kmem_zalloc(sizeof (iommulib_cache_t), KM_SLEEP); 1062 cachep->cache_rdip = rdip; 1063 cachep->cache_unit = unitp; /* ref-count set by caller */ 1064 cachep->cache_prev = NULL; 1065 cachep->cache_next = iommulib_cache[idx]; 1066 if (cachep->cache_next) 1067 cachep->cache_next->cache_prev = cachep; 1068 iommulib_cache[idx] = cachep; 1069 } 1070 1071 mutex_exit(&iommulib_cache_lock); 1072 mutex_exit(&iommulib_lock); 1073 } 1074