1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "@(#)iommulib.c 1.6 08/09/07 SMI" 27 28 #include <sys/sunddi.h> 29 #include <sys/sunndi.h> 30 #include <sys/errno.h> 31 #include <sys/modctl.h> 32 #include <sys/iommulib.h> 33 34 /* ******** Type definitions private to this file ********************** */ 35 36 /* 1 per IOMMU unit. There may be more than one per dip */ 37 typedef struct iommulib_unit { 38 kmutex_t ilu_lock; 39 uint64_t ilu_ref; 40 uint32_t ilu_unitid; 41 dev_info_t *ilu_dip; 42 iommulib_ops_t *ilu_ops; 43 void* ilu_data; 44 struct iommulib_unit *ilu_next; 45 struct iommulib_unit *ilu_prev; 46 } iommulib_unit_t; 47 48 typedef struct iommulib_cache { 49 dev_info_t *cache_rdip; 50 iommulib_unit_t *cache_unit; 51 struct iommulib_cache *cache_next; 52 struct iommulib_cache *cache_prev; 53 } iommulib_cache_t; 54 55 typedef struct iommulib_nex { 56 dev_info_t *nex_dip; 57 iommulib_nexops_t nex_ops; 58 struct iommulib_nex *nex_next; 59 struct iommulib_nex *nex_prev; 60 } iommulib_nex_t; 61 62 /* ********* Function prototypes ********************* */ 63 static int lookup_cache(dev_info_t *rdip, iommulib_unit_t **unitpp); 64 static void insert_cache(dev_info_t *rdip, iommulib_unit_t *unitp); 65 66 67 /* ********* Globals ************************ */ 68 69 /* IOMMU side: Following data protected by lock */ 70 static kmutex_t iommulib_lock; 71 static iommulib_unit_t *iommulib_list; 72 static uint64_t iommulib_unit_ids = 0; 73 static uint64_t iommulib_num_units = 0; 74 75 /* rootnex side data */ 76 77 static kmutex_t iommulib_nexus_lock; 78 static iommulib_nex_t *iommulib_nexus_list; 79 80 #define IOMMULIB_CACHE_SIZE 256 81 static kmutex_t iommulib_cache_lock; 82 static iommulib_cache_t **iommulib_cache; 83 84 /* tunable via /etc/system */ 85 static uint_t iommulib_cache_size = IOMMULIB_CACHE_SIZE; 86 87 /* can be set atomically without lock */ 88 static volatile uint32_t iommulib_fini; 89 90 /* debug flag */ 91 static int iommulib_debug; 92 93 /* 94 * Module linkage information for the kernel. 95 */ 96 static struct modlmisc modlmisc = { 97 &mod_miscops, "IOMMU library module" 98 }; 99 100 static struct modlinkage modlinkage = { 101 MODREV_1, (void *)&modlmisc, NULL 102 }; 103 104 int 105 _init(void) 106 { 107 /* 108 * static mutexes automagically initialized 109 * by being allocated in zeroed memory 110 */ 111 mutex_enter(&iommulib_cache_lock); 112 iommulib_cache = kmem_zalloc( 113 sizeof (iommulib_cache_t *) * iommulib_cache_size, KM_SLEEP); 114 mutex_exit(&iommulib_cache_lock); 115 116 return (mod_install(&modlinkage)); 117 } 118 119 int 120 _fini(void) 121 { 122 mutex_enter(&iommulib_lock); 123 if (iommulib_list != NULL || iommulib_nexus_list != NULL) { 124 mutex_exit(&iommulib_lock); 125 return (EBUSY); 126 } 127 iommulib_fini = 1; 128 129 mutex_enter(&iommulib_cache_lock); 130 kmem_free(iommulib_cache, 131 sizeof (iommulib_cache_t *) * iommulib_cache_size); 132 iommulib_cache = NULL; 133 mutex_exit(&iommulib_cache_lock); 134 135 mutex_exit(&iommulib_lock); 136 return (mod_remove(&modlinkage)); 137 } 138 139 int 140 _info(struct modinfo *modinfop) 141 { 142 return (mod_info(&modlinkage, modinfop)); 143 } 144 145 /* 146 * Routines with iommulib_iommu_* are invoked from the 147 * IOMMU driver. 148 * Routines with iommulib_nex* are invoked from the 149 * nexus driver (typically rootnex) 150 */ 151 152 int 153 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops, 154 iommulib_nexhandle_t *handle) 155 { 156 iommulib_nex_t *nexp; 157 int instance = ddi_get_instance(dip); 158 const char *driver = ddi_driver_name(dip); 159 dev_info_t *pdip = ddi_get_parent(dip); 160 const char *f = "iommulib_nexus_register"; 161 162 ASSERT(nexops); 163 ASSERT(handle); 164 165 *handle = NULL; 166 167 /* 168 * Root node is never busy held 169 */ 170 if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED || 171 !DEVI_BUSY_OWNED(pdip))) { 172 cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED " 173 "or busy held for nexops vector (%p). Failing registration", 174 f, (void *)nexops); 175 return (DDI_FAILURE); 176 } 177 178 if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) { 179 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version " 180 "in nexops vector (%p). Failing NEXUS registration", 181 f, driver, instance, (void *)nexops); 182 return (DDI_FAILURE); 183 } 184 185 ASSERT(nexops->nops_data == NULL); 186 187 if (nexops->nops_id == NULL) { 188 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. " 189 "Failing registration for nexops vector: %p", 190 f, driver, instance, (void *)nexops); 191 return (DDI_FAILURE); 192 } 193 194 if (nexops->nops_dma_allochdl == NULL) { 195 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. " 196 "Failing registration for ops vector: %p", f, 197 driver, instance, (void *)nexops); 198 return (DDI_FAILURE); 199 } 200 201 if (nexops->nops_dma_freehdl == NULL) { 202 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. " 203 "Failing registration for ops vector: %p", f, 204 driver, instance, (void *)nexops); 205 return (DDI_FAILURE); 206 } 207 208 if (nexops->nops_dma_bindhdl == NULL) { 209 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. " 210 "Failing registration for ops vector: %p", f, 211 driver, instance, (void *)nexops); 212 return (DDI_FAILURE); 213 } 214 215 if (nexops->nops_dma_sync == NULL) { 216 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. " 217 "Failing registration for ops vector: %p", f, 218 driver, instance, (void *)nexops); 219 return (DDI_FAILURE); 220 } 221 222 223 if (nexops->nops_dma_reset_cookies == NULL) { 224 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. " 225 "Failing registration for ops vector: %p", f, 226 driver, instance, (void *)nexops); 227 return (DDI_FAILURE); 228 } 229 230 if (nexops->nops_dma_get_cookies == NULL) { 231 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. " 232 "Failing registration for ops vector: %p", f, 233 driver, instance, (void *)nexops); 234 return (DDI_FAILURE); 235 } 236 237 if (nexops->nops_dma_win == NULL) { 238 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. " 239 "Failing registration for ops vector: %p", f, 240 driver, instance, (void *)nexops); 241 return (DDI_FAILURE); 242 } 243 244 /* Check for legacy ops */ 245 if (nexops->nops_dma_map == NULL) { 246 cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. " 247 "Failing registration for ops vector: %p", f, 248 driver, instance, (void *)nexops); 249 return (DDI_FAILURE); 250 } 251 252 if (nexops->nops_dma_mctl == NULL) { 253 cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. " 254 "Failing registration for ops vector: %p", f, 255 driver, instance, (void *)nexops); 256 return (DDI_FAILURE); 257 } 258 259 nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP); 260 261 mutex_enter(&iommulib_lock); 262 if (iommulib_fini == 1) { 263 mutex_exit(&iommulib_lock); 264 cmn_err(CE_WARN, "%s: IOMMULIB unloading. " 265 "Failing NEXUS register.", f); 266 kmem_free(nexp, sizeof (iommulib_nex_t)); 267 return (DDI_FAILURE); 268 } 269 270 /* 271 * fini/register race conditions have been handled. Now create the 272 * nexus struct 273 */ 274 ndi_hold_devi(dip); 275 nexp->nex_dip = dip; 276 nexp->nex_ops = *nexops; 277 278 mutex_enter(&iommulib_nexus_lock); 279 nexp->nex_next = iommulib_nexus_list; 280 iommulib_nexus_list = nexp; 281 nexp->nex_prev = NULL; 282 283 if (nexp->nex_next != NULL) 284 nexp->nex_next->nex_prev = nexp; 285 286 mutex_exit(&iommulib_nexus_lock); 287 mutex_exit(&iommulib_lock); 288 289 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s " 290 "nexops=%p", f, driver, instance, ddi_node_name(dip), 291 (void *)nexops); 292 293 *handle = nexp; 294 295 return (DDI_SUCCESS); 296 } 297 298 int 299 iommulib_nexus_unregister(iommulib_nexhandle_t handle) 300 { 301 dev_info_t *dip; 302 int instance; 303 const char *driver; 304 iommulib_nex_t *nexp = (iommulib_nex_t *)handle; 305 const char *f = "iommulib_nexus_unregister"; 306 307 ASSERT(nexp); 308 309 mutex_enter(&iommulib_nexus_lock); 310 311 dip = nexp->nex_dip; 312 driver = ddi_driver_name(dip); 313 instance = ddi_get_instance(dip); 314 315 /* A future enhancement would be to add ref-counts */ 316 317 if (nexp->nex_prev == NULL) { 318 iommulib_nexus_list = nexp->nex_next; 319 } else { 320 nexp->nex_prev->nex_next = nexp->nex_next; 321 } 322 323 if (nexp->nex_next != NULL) 324 nexp->nex_next->nex_prev = nexp->nex_prev; 325 326 mutex_exit(&iommulib_nexus_lock); 327 328 kmem_free(nexp, sizeof (iommulib_nex_t)); 329 330 cmn_err(CE_WARN, "%s: %s%d: NEXUS (%s) handle successfully " 331 "unregistered from IOMMULIB", f, driver, instance, 332 ddi_node_name(dip)); 333 334 ndi_rele_devi(dip); 335 336 return (DDI_SUCCESS); 337 } 338 339 static iommulib_nexops_t * 340 lookup_nexops(dev_info_t *dip) 341 { 342 iommulib_nex_t *nexp; 343 344 mutex_enter(&iommulib_nexus_lock); 345 nexp = iommulib_nexus_list; 346 while (nexp) { 347 if (nexp->nex_dip == dip) 348 break; 349 nexp = nexp->nex_next; 350 } 351 mutex_exit(&iommulib_nexus_lock); 352 353 return (nexp ? &nexp->nex_ops : NULL); 354 } 355 356 int 357 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops, 358 iommulib_handle_t *handle) 359 { 360 const char *vendor; 361 iommulib_unit_t *unitp; 362 int instance = ddi_get_instance(dip); 363 const char *driver = ddi_driver_name(dip); 364 dev_info_t *pdip = ddi_get_parent(dip); 365 const char *f = "iommulib_register"; 366 367 ASSERT(ops); 368 ASSERT(handle); 369 370 if (i_ddi_node_state(dip) < DS_PROBED || !DEVI_BUSY_OWNED(pdip)) { 371 cmn_err(CE_WARN, "%s: devinfo node not in DS_PROBED or " 372 "busy held for ops vector (%p). Failing registration", 373 f, (void *)ops); 374 return (DDI_FAILURE); 375 } 376 377 378 if (ops->ilops_vers != IOMMU_OPS_VERSION) { 379 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version " 380 "in ops vector (%p). Failing registration", f, driver, 381 instance, (void *)ops); 382 return (DDI_FAILURE); 383 } 384 385 switch (ops->ilops_vendor) { 386 case AMD_IOMMU: 387 vendor = "AMD"; 388 break; 389 case INTEL_IOMMU: 390 vendor = "Intel"; 391 break; 392 case INVALID_VENDOR: 393 cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. " 394 "Failing registration for ops vector: %p", f, 395 driver, instance, ops->ilops_vendor, (void *)ops); 396 return (DDI_FAILURE); 397 default: 398 cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). " 399 "Failing registration for ops vector: %p", f, 400 driver, instance, ops->ilops_vendor, (void *)ops); 401 return (DDI_FAILURE); 402 } 403 404 cmn_err(CE_NOTE, "%s: %s%d: Detected IOMMU registration from vendor %s", 405 f, driver, instance, vendor); 406 407 if (ops->ilops_data == NULL) { 408 cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. " 409 "Failing registration for ops vector: %p", f, 410 driver, instance, (void *)ops); 411 return (DDI_FAILURE); 412 } 413 414 if (ops->ilops_id == NULL) { 415 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. " 416 "Failing registration for ops vector: %p", f, 417 driver, instance, (void *)ops); 418 return (DDI_FAILURE); 419 } 420 421 if (ops->ilops_probe == NULL) { 422 cmn_err(CE_WARN, "%s: %s%d: NULL probe op. " 423 "Failing registration for ops vector: %p", f, 424 driver, instance, (void *)ops); 425 return (DDI_FAILURE); 426 } 427 428 if (ops->ilops_dma_allochdl == NULL) { 429 cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. " 430 "Failing registration for ops vector: %p", f, 431 driver, instance, (void *)ops); 432 return (DDI_FAILURE); 433 } 434 435 if (ops->ilops_dma_freehdl == NULL) { 436 cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. " 437 "Failing registration for ops vector: %p", f, 438 driver, instance, (void *)ops); 439 return (DDI_FAILURE); 440 } 441 442 if (ops->ilops_dma_bindhdl == NULL) { 443 cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. " 444 "Failing registration for ops vector: %p", f, 445 driver, instance, (void *)ops); 446 return (DDI_FAILURE); 447 } 448 449 if (ops->ilops_dma_sync == NULL) { 450 cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. " 451 "Failing registration for ops vector: %p", f, 452 driver, instance, (void *)ops); 453 return (DDI_FAILURE); 454 } 455 456 if (ops->ilops_dma_win == NULL) { 457 cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. " 458 "Failing registration for ops vector: %p", f, 459 driver, instance, (void *)ops); 460 return (DDI_FAILURE); 461 } 462 463 /* Check for legacy ops */ 464 if (ops->ilops_dma_map == NULL) { 465 cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. " 466 "Failing registration for ops vector: %p", f, 467 driver, instance, (void *)ops); 468 return (DDI_FAILURE); 469 } 470 471 if (ops->ilops_dma_mctl == NULL) { 472 cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. " 473 "Failing registration for ops vector: %p", f, 474 driver, instance, (void *)ops); 475 return (DDI_FAILURE); 476 } 477 478 unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP); 479 mutex_enter(&iommulib_lock); 480 if (iommulib_fini == 1) { 481 mutex_exit(&iommulib_lock); 482 cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.", 483 f); 484 kmem_free(unitp, sizeof (iommulib_unit_t)); 485 return (DDI_FAILURE); 486 } 487 488 /* 489 * fini/register race conditions have been handled. Now create the 490 * IOMMU unit 491 */ 492 mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL); 493 494 mutex_enter(&unitp->ilu_lock); 495 unitp->ilu_unitid = ++iommulib_unit_ids; 496 unitp->ilu_ref = 0; 497 ndi_hold_devi(dip); 498 unitp->ilu_dip = dip; 499 unitp->ilu_ops = ops; 500 unitp->ilu_data = ops->ilops_data; 501 502 unitp->ilu_next = iommulib_list; 503 unitp->ilu_prev = NULL; 504 iommulib_list->ilu_prev = unitp; 505 iommulib_list = unitp; 506 507 mutex_exit(&unitp->ilu_lock); 508 509 iommulib_num_units++; 510 511 *handle = unitp; 512 513 mutex_exit(&iommulib_lock); 514 515 cmn_err(CE_NOTE, "%s: %s%d: Succesfully registered IOMMU unit " 516 "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u", 517 f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data, 518 unitp->ilu_unitid); 519 520 return (DDI_SUCCESS); 521 } 522 523 int 524 iommulib_iommu_unregister(iommulib_handle_t handle) 525 { 526 uint32_t unitid; 527 dev_info_t *dip; 528 int instance; 529 const char *driver; 530 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 531 const char *f = "iommulib_unregister"; 532 533 ASSERT(unitp); 534 535 mutex_enter(&iommulib_lock); 536 mutex_enter(&unitp->ilu_lock); 537 538 unitid = unitp->ilu_unitid; 539 dip = unitp->ilu_dip; 540 driver = ddi_driver_name(dip); 541 instance = ddi_get_instance(dip); 542 543 if (unitp->ilu_ref != 0) { 544 mutex_exit(&unitp->ilu_lock); 545 mutex_exit(&iommulib_lock); 546 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot " 547 "unregister IOMMULIB unitid %u", 548 f, driver, instance, unitid); 549 return (DDI_FAILURE); 550 } 551 unitp->ilu_unitid = 0; 552 ASSERT(unitp->ilu_ref == 0); 553 554 if (unitp->ilu_prev == NULL) { 555 iommulib_list = unitp->ilu_next; 556 unitp->ilu_next->ilu_prev = NULL; 557 } else { 558 unitp->ilu_prev->ilu_next = unitp->ilu_next; 559 unitp->ilu_next->ilu_prev = unitp->ilu_prev; 560 } 561 562 iommulib_num_units--; 563 564 mutex_exit(&unitp->ilu_lock); 565 566 mutex_destroy(&unitp->ilu_lock); 567 kmem_free(unitp, sizeof (iommulib_unit_t)); 568 569 mutex_exit(&iommulib_lock); 570 571 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully " 572 "unregistered", f, driver, instance, unitid); 573 574 ndi_rele_devi(dip); 575 576 return (DDI_SUCCESS); 577 } 578 579 int 580 iommulib_nex_open(dev_info_t *rdip, uint_t *errorp) 581 { 582 iommulib_unit_t *unitp; 583 int instance = ddi_get_instance(rdip); 584 const char *driver = ddi_driver_name(rdip); 585 const char *f = "iommulib_nex_open"; 586 587 *errorp = 0; 588 DEVI(rdip)->devi_iommulib_handle = NULL; 589 590 /* prevent use of IOMMU for AMD IOMMU's DMA */ 591 if (strcmp(driver, "amd_iommu") == 0) { 592 *errorp = ENOTSUP; 593 return (DDI_FAILURE); 594 } 595 596 if (lookup_cache(rdip, &unitp) == DDI_SUCCESS) { 597 DEVI(rdip)->devi_iommulib_handle = 598 (iommulib_handle_t)unitp; 599 return (DDI_SUCCESS); 600 } 601 602 603 /* 604 * Ok this dip is not in the cache. Use the probe entry point 605 * to determine in a hardware specific manner whether this 606 * dip is controlled by an IOMMU. If yes, insert it into the 607 * cache and return the handle corresponding to the IOMMU unit. 608 */ 609 610 mutex_enter(&iommulib_lock); 611 for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) { 612 if (unitp->ilu_ops->ilops_probe(rdip) == DDI_SUCCESS) 613 break; 614 } 615 616 if (unitp == NULL) { 617 mutex_exit(&iommulib_lock); 618 if (iommulib_debug) { 619 char buf[MAXPATHLEN]; 620 cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not " 621 "controlled by an IOMMU: path=%s", f, driver, 622 instance, (void *)rdip, ddi_pathname(rdip, buf)); 623 } 624 *errorp = ENOTSUP; 625 return (DDI_FAILURE); 626 } 627 628 mutex_enter(&unitp->ilu_lock); 629 unitp->ilu_ref++; 630 mutex_exit(&unitp->ilu_lock); 631 mutex_exit(&iommulib_lock); 632 633 insert_cache(rdip, unitp); 634 635 DEVI(rdip)->devi_iommulib_handle = unitp; 636 637 return (DDI_SUCCESS); 638 } 639 640 void 641 iommulib_nex_close(dev_info_t *rdip) 642 { 643 char buf[MAXPATHLEN]; 644 iommulib_unit_t *unitp; 645 const char *driver; 646 int instance; 647 uint32_t unitid; 648 const char *f = "iommulib_nex_close"; 649 650 unitp = (iommulib_unit_t *)DEVI(rdip)->devi_iommulib_handle; 651 if (unitp == NULL) 652 return; 653 654 DEVI(rdip)->devi_iommulib_handle = NULL; 655 656 /* 657 * Assume we don't support DR of IOMMUs. The mapping of 658 * dips to IOMMU units should not change. Let the mapping 659 * persist in the cache. 660 */ 661 662 mutex_enter(&iommulib_lock); 663 mutex_enter(&unitp->ilu_lock); 664 unitid = unitp->ilu_unitid; 665 driver = ddi_driver_name(unitp->ilu_dip); 666 instance = ddi_get_instance(unitp->ilu_dip); 667 (void) ddi_pathname(rdip, buf); 668 unitp->ilu_ref--; 669 mutex_exit(&unitp->ilu_lock); 670 mutex_exit(&iommulib_lock); 671 672 if (iommulib_debug) { 673 cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), " 674 "unitid=%u rdip path = %s", f, driver, instance, 675 (void *)rdip, unitid, buf); 676 } 677 } 678 679 int 680 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip, 681 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), 682 caddr_t arg, ddi_dma_handle_t *dma_handlep) 683 { 684 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 685 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 686 687 ASSERT(unitp); 688 689 /* No need to grab lock - the handle is reference counted */ 690 return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip, 691 attr, waitfp, arg, dma_handlep)); 692 } 693 694 int 695 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip, 696 ddi_dma_handle_t dma_handle) 697 { 698 int error; 699 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 700 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 701 702 ASSERT(unitp); 703 704 /* No need to grab lock - the handle is reference counted */ 705 error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip, 706 rdip, dma_handle); 707 708 iommulib_nex_close(rdip); 709 710 return (error); 711 } 712 713 int 714 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 715 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq, 716 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 717 { 718 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 719 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 720 721 ASSERT(unitp); 722 723 /* No need to grab lock - the handle is reference counted */ 724 return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle, 725 dmareq, cookiep, ccountp)); 726 } 727 728 int 729 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 730 ddi_dma_handle_t dma_handle) 731 { 732 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 733 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 734 735 ASSERT(unitp); 736 737 /* No need to grab lock - the handle is reference counted */ 738 return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip, 739 dma_handle)); 740 } 741 742 int 743 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip, 744 ddi_dma_handle_t dma_handle, off_t off, size_t len, 745 uint_t cache_flags) 746 { 747 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 748 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 749 750 ASSERT(unitp); 751 752 /* No need to grab lock - the handle is reference counted */ 753 return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle, 754 off, len, cache_flags)); 755 } 756 757 int 758 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip, 759 ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp, 760 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 761 { 762 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 763 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 764 765 ASSERT(unitp); 766 767 /* No need to grab lock - the handle is reference counted */ 768 return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle, 769 win, offp, lenp, cookiep, ccountp)); 770 } 771 772 /* Obsolete DMA routines */ 773 774 int 775 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip, 776 struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle) 777 { 778 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 779 iommulib_unit_t *unitp = handle; 780 781 ASSERT(unitp); 782 783 /* No need to grab lock - the handle is reference counted */ 784 return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq, 785 dma_handle)); 786 } 787 788 int 789 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip, 790 ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request, 791 off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags) 792 { 793 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle; 794 iommulib_unit_t *unitp = (iommulib_unit_t *)handle; 795 796 ASSERT(unitp); 797 798 /* No need to grab lock - the handle is reference counted */ 799 return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle, 800 request, offp, lenp, objpp, cache_flags)); 801 } 802 803 /* Utility routines invoked by IOMMU drivers */ 804 int 805 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 806 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 807 ddi_dma_handle_t *handlep) 808 { 809 iommulib_nexops_t *nexops = lookup_nexops(dip); 810 if (nexops == NULL) 811 return (DDI_FAILURE); 812 return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg, 813 handlep)); 814 } 815 816 int 817 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 818 ddi_dma_handle_t handle) 819 { 820 iommulib_nexops_t *nexops = lookup_nexops(dip); 821 if (nexops == NULL) 822 return (DDI_FAILURE); 823 return (nexops->nops_dma_freehdl(dip, rdip, handle)); 824 } 825 826 int 827 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 828 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 829 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 830 { 831 iommulib_nexops_t *nexops = lookup_nexops(dip); 832 if (nexops == NULL) 833 return (DDI_FAILURE); 834 return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq, 835 cookiep, ccountp)); 836 } 837 838 int 839 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 840 ddi_dma_handle_t handle) 841 { 842 iommulib_nexops_t *nexops = lookup_nexops(dip); 843 if (nexops == NULL) 844 return (DDI_FAILURE); 845 return (nexops->nops_dma_unbindhdl(dip, rdip, handle)); 846 } 847 848 void 849 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 850 { 851 iommulib_nexops_t *nexops = lookup_nexops(dip); 852 nexops->nops_dma_reset_cookies(dip, handle); 853 } 854 855 int 856 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 857 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 858 { 859 iommulib_nexops_t *nexops = lookup_nexops(dip); 860 if (nexops == NULL) 861 return (DDI_FAILURE); 862 return (nexops->nops_dma_get_cookies(dip, handle, cookiep, ccountp)); 863 } 864 865 int 866 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip, 867 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags) 868 { 869 iommulib_nexops_t *nexops = lookup_nexops(dip); 870 if (nexops == NULL) 871 return (DDI_FAILURE); 872 return (nexops->nops_dma_sync(dip, rdip, handle, off, len, 873 cache_flags)); 874 } 875 876 int 877 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 878 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 879 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 880 { 881 iommulib_nexops_t *nexops = lookup_nexops(dip); 882 if (nexops == NULL) 883 return (DDI_FAILURE); 884 return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp, 885 cookiep, ccountp)); 886 } 887 888 int 889 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip, 890 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 891 { 892 iommulib_nexops_t *nexops = lookup_nexops(dip); 893 if (nexops == NULL) 894 return (DDI_FAILURE); 895 return (nexops->nops_dma_map(dip, rdip, dmareq, handlep)); 896 } 897 898 int 899 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 900 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp, 901 size_t *lenp, caddr_t *objpp, uint_t cache_flags) 902 { 903 iommulib_nexops_t *nexops = lookup_nexops(dip); 904 if (nexops == NULL) 905 return (DDI_FAILURE); 906 return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp, 907 objpp, cache_flags)); 908 } 909 910 int 911 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp) 912 { 913 iommulib_unit_t *unitp; 914 uint64_t unitid; 915 916 unitp = (iommulib_unit_t *)handle; 917 918 ASSERT(unitp); 919 ASSERT(unitidp); 920 921 mutex_enter(&unitp->ilu_lock); 922 unitid = unitp->ilu_unitid; 923 mutex_exit(&unitp->ilu_lock); 924 925 ASSERT(unitid > 0); 926 *unitidp = (uint64_t)unitid; 927 928 return (DDI_SUCCESS); 929 } 930 931 dev_info_t * 932 iommulib_iommu_getdip(iommulib_handle_t handle) 933 { 934 iommulib_unit_t *unitp; 935 dev_info_t *dip; 936 937 unitp = (iommulib_unit_t *)handle; 938 939 ASSERT(unitp); 940 941 mutex_enter(&unitp->ilu_lock); 942 dip = unitp->ilu_dip; 943 ASSERT(dip); 944 ndi_hold_devi(dip); 945 mutex_exit(&unitp->ilu_lock); 946 947 return (dip); 948 } 949 950 iommulib_ops_t * 951 iommulib_iommu_getops(iommulib_handle_t handle) 952 { 953 iommulib_unit_t *unitp; 954 iommulib_ops_t *ops; 955 956 unitp = (iommulib_unit_t *)handle; 957 958 ASSERT(unitp); 959 960 mutex_enter(&unitp->ilu_lock); 961 ops = unitp->ilu_ops; 962 mutex_exit(&unitp->ilu_lock); 963 964 ASSERT(ops); 965 966 return (ops); 967 } 968 969 void * 970 iommulib_iommu_getdata(iommulib_handle_t handle) 971 { 972 iommulib_unit_t *unitp; 973 void *data; 974 975 unitp = (iommulib_unit_t *)handle; 976 977 ASSERT(unitp); 978 979 mutex_enter(&unitp->ilu_lock); 980 data = unitp->ilu_data; 981 mutex_exit(&unitp->ilu_lock); 982 983 ASSERT(data); 984 985 return (data); 986 } 987 988 /* 989 * Internal routines 990 */ 991 992 static uint32_t 993 hashfn(uint64_t ptr) 994 { 995 return (ptr % iommulib_cache_size); 996 } 997 998 static int 999 lookup_cache(dev_info_t *rdip, iommulib_unit_t **unitpp) 1000 { 1001 uint64_t idx; 1002 iommulib_cache_t *cachep; 1003 iommulib_unit_t *unitp; 1004 int retval = DDI_FAILURE; 1005 1006 *unitpp = NULL; 1007 1008 mutex_enter(&iommulib_lock); 1009 mutex_enter(&iommulib_cache_lock); 1010 1011 ASSERT(iommulib_cache); 1012 1013 idx = hashfn((uint64_t)(uintptr_t)rdip); 1014 1015 ASSERT(idx < iommulib_cache_size); 1016 1017 for (cachep = iommulib_cache[idx]; cachep; 1018 cachep = cachep->cache_next) { 1019 if (cachep->cache_rdip == rdip) 1020 break; 1021 } 1022 1023 if (cachep != NULL) { 1024 unitp = cachep->cache_unit; 1025 mutex_enter(&unitp->ilu_lock); 1026 unitp->ilu_ref++; 1027 mutex_exit(&unitp->ilu_lock); 1028 *unitpp = unitp; 1029 retval = DDI_SUCCESS; 1030 } 1031 1032 mutex_exit(&iommulib_cache_lock); 1033 mutex_exit(&iommulib_lock); 1034 return (retval); 1035 } 1036 1037 static void 1038 insert_cache(dev_info_t *rdip, iommulib_unit_t *unitp) 1039 { 1040 uint32_t idx; 1041 iommulib_cache_t *cachep; 1042 1043 mutex_enter(&iommulib_lock); 1044 mutex_enter(&iommulib_cache_lock); 1045 1046 ASSERT(iommulib_cache); 1047 1048 idx = hashfn((uint64_t)(uintptr_t)rdip); 1049 1050 ASSERT(idx < iommulib_cache_size); 1051 1052 for (cachep = iommulib_cache[idx]; cachep; 1053 cachep = cachep->cache_next) { 1054 if (cachep->cache_rdip == rdip) 1055 break; 1056 } 1057 1058 if (cachep == NULL) { 1059 cachep = kmem_zalloc(sizeof (iommulib_cache_t), KM_SLEEP); 1060 cachep->cache_rdip = rdip; 1061 cachep->cache_unit = unitp; /* ref-count set by caller */ 1062 cachep->cache_prev = NULL; 1063 cachep->cache_next = iommulib_cache[idx]; 1064 if (cachep->cache_next) 1065 cachep->cache_next->cache_prev = cachep; 1066 iommulib_cache[idx] = cachep; 1067 } 1068 1069 mutex_exit(&iommulib_cache_lock); 1070 mutex_exit(&iommulib_lock); 1071 } 1072