1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * intel-pasid.c - PASID idr, table and entry manipulation 4 * 5 * Copyright (C) 2018 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #define pr_fmt(fmt) "DMAR: " fmt 11 12 #include <linux/bitops.h> 13 #include <linux/cpufeature.h> 14 #include <linux/dmar.h> 15 #include <linux/iommu.h> 16 #include <linux/memory.h> 17 #include <linux/pci.h> 18 #include <linux/pci-ats.h> 19 #include <linux/spinlock.h> 20 21 #include "iommu.h" 22 #include "pasid.h" 23 #include "../iommu-pages.h" 24 25 /* 26 * Intel IOMMU system wide PASID name space: 27 */ 28 u32 intel_pasid_max_id = PASID_MAX; 29 30 /* 31 * Per device pasid table management: 32 */ 33 34 /* 35 * Allocate a pasid table for @dev. It should be called in a 36 * single-thread context. 37 */ 38 int intel_pasid_alloc_table(struct device *dev) 39 { 40 struct device_domain_info *info; 41 struct pasid_table *pasid_table; 42 struct pasid_dir_entry *dir; 43 u32 max_pasid = 0; 44 int order, size; 45 46 might_sleep(); 47 info = dev_iommu_priv_get(dev); 48 if (WARN_ON(!info || !dev_is_pci(dev))) 49 return -ENODEV; 50 if (WARN_ON(info->pasid_table)) 51 return -EEXIST; 52 53 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); 54 if (!pasid_table) 55 return -ENOMEM; 56 57 if (info->pasid_supported) 58 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)), 59 intel_pasid_max_id); 60 61 size = max_pasid >> (PASID_PDE_SHIFT - 3); 62 order = size ? get_order(size) : 0; 63 dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); 64 if (!dir) { 65 kfree(pasid_table); 66 return -ENOMEM; 67 } 68 69 pasid_table->table = dir; 70 pasid_table->order = order; 71 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); 72 info->pasid_table = pasid_table; 73 74 if (!ecap_coherent(info->iommu->ecap)) 75 clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE); 76 77 return 0; 78 } 79 80 void intel_pasid_free_table(struct device *dev) 81 { 82 struct device_domain_info *info; 83 struct pasid_table *pasid_table; 84 struct pasid_dir_entry *dir; 85 struct pasid_entry *table; 86 int i, max_pde; 87 88 info = dev_iommu_priv_get(dev); 89 if (!info || !dev_is_pci(dev) || !info->pasid_table) 90 return; 91 92 pasid_table = info->pasid_table; 93 info->pasid_table = NULL; 94 95 /* Free scalable mode PASID directory tables: */ 96 dir = pasid_table->table; 97 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; 98 for (i = 0; i < max_pde; i++) { 99 table = get_pasid_table_from_pde(&dir[i]); 100 iommu_free_page(table); 101 } 102 103 iommu_free_pages(pasid_table->table, pasid_table->order); 104 kfree(pasid_table); 105 } 106 107 struct pasid_table *intel_pasid_get_table(struct device *dev) 108 { 109 struct device_domain_info *info; 110 111 info = dev_iommu_priv_get(dev); 112 if (!info) 113 return NULL; 114 115 return info->pasid_table; 116 } 117 118 static int intel_pasid_get_dev_max_id(struct device *dev) 119 { 120 struct device_domain_info *info; 121 122 info = dev_iommu_priv_get(dev); 123 if (!info || !info->pasid_table) 124 return 0; 125 126 return info->pasid_table->max_pasid; 127 } 128 129 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) 130 { 131 struct device_domain_info *info; 132 struct pasid_table *pasid_table; 133 struct pasid_dir_entry *dir; 134 struct pasid_entry *entries; 135 int dir_index, index; 136 137 pasid_table = intel_pasid_get_table(dev); 138 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev))) 139 return NULL; 140 141 dir = pasid_table->table; 142 info = dev_iommu_priv_get(dev); 143 dir_index = pasid >> PASID_PDE_SHIFT; 144 index = pasid & PASID_PTE_MASK; 145 146 retry: 147 entries = get_pasid_table_from_pde(&dir[dir_index]); 148 if (!entries) { 149 u64 tmp; 150 151 entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); 152 if (!entries) 153 return NULL; 154 155 /* 156 * The pasid directory table entry won't be freed after 157 * allocation. No worry about the race with free and 158 * clear. However, this entry might be populated by others 159 * while we are preparing it. Use theirs with a retry. 160 */ 161 tmp = 0ULL; 162 if (!try_cmpxchg64(&dir[dir_index].val, &tmp, 163 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { 164 iommu_free_page(entries); 165 goto retry; 166 } 167 if (!ecap_coherent(info->iommu->ecap)) { 168 clflush_cache_range(entries, VTD_PAGE_SIZE); 169 clflush_cache_range(&dir[dir_index].val, sizeof(*dir)); 170 } 171 } 172 173 return &entries[index]; 174 } 175 176 /* 177 * Interfaces for PASID table entry manipulation: 178 */ 179 static void 180 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore) 181 { 182 struct pasid_entry *pe; 183 184 pe = intel_pasid_get_entry(dev, pasid); 185 if (WARN_ON(!pe)) 186 return; 187 188 if (fault_ignore && pasid_pte_is_present(pe)) 189 pasid_clear_entry_with_fpd(pe); 190 else 191 pasid_clear_entry(pe); 192 } 193 194 static void 195 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, 196 u16 did, u32 pasid) 197 { 198 struct qi_desc desc; 199 200 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) | 201 QI_PC_PASID(pasid) | QI_PC_TYPE; 202 desc.qw1 = 0; 203 desc.qw2 = 0; 204 desc.qw3 = 0; 205 206 qi_submit_sync(iommu, &desc, 1, 0); 207 } 208 209 static void 210 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, 211 struct device *dev, u32 pasid) 212 { 213 struct device_domain_info *info; 214 u16 sid, qdep, pfsid; 215 216 info = dev_iommu_priv_get(dev); 217 if (!info || !info->ats_enabled) 218 return; 219 220 if (pci_dev_is_disconnected(to_pci_dev(dev))) 221 return; 222 223 sid = info->bus << 8 | info->devfn; 224 qdep = info->ats_qdep; 225 pfsid = info->pfsid; 226 227 /* 228 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), 229 * devTLB flush w/o PASID should be used. For non-zero PASID under 230 * SVA usage, device could do DMA with multiple PASIDs. It is more 231 * efficient to flush devTLB specific to the PASID. 232 */ 233 if (pasid == IOMMU_NO_PASID) 234 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); 235 else 236 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); 237 } 238 239 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, 240 u32 pasid, bool fault_ignore) 241 { 242 struct pasid_entry *pte; 243 u16 did, pgtt; 244 245 spin_lock(&iommu->lock); 246 pte = intel_pasid_get_entry(dev, pasid); 247 if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) { 248 spin_unlock(&iommu->lock); 249 return; 250 } 251 252 did = pasid_get_domain_id(pte); 253 pgtt = pasid_pte_get_pgtt(pte); 254 intel_pasid_clear_entry(dev, pasid, fault_ignore); 255 spin_unlock(&iommu->lock); 256 257 if (!ecap_coherent(iommu->ecap)) 258 clflush_cache_range(pte, sizeof(*pte)); 259 260 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 261 262 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) 263 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 264 else 265 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 266 267 /* Device IOTLB doesn't need to be flushed in caching mode. */ 268 if (!cap_caching_mode(iommu->cap)) 269 devtlb_invalidation_with_pasid(iommu, dev, pasid); 270 } 271 272 /* 273 * This function flushes cache for a newly setup pasid table entry. 274 * Caller of it should not modify the in-use pasid table entries. 275 */ 276 static void pasid_flush_caches(struct intel_iommu *iommu, 277 struct pasid_entry *pte, 278 u32 pasid, u16 did) 279 { 280 if (!ecap_coherent(iommu->ecap)) 281 clflush_cache_range(pte, sizeof(*pte)); 282 283 if (cap_caching_mode(iommu->cap)) { 284 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 285 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 286 } else { 287 iommu_flush_write_buffer(iommu); 288 } 289 } 290 291 /* 292 * Set up the scalable mode pasid table entry for first only 293 * translation type. 294 */ 295 int intel_pasid_setup_first_level(struct intel_iommu *iommu, 296 struct device *dev, pgd_t *pgd, 297 u32 pasid, u16 did, int flags) 298 { 299 struct pasid_entry *pte; 300 301 if (!ecap_flts(iommu->ecap)) { 302 pr_err("No first level translation support on %s\n", 303 iommu->name); 304 return -EINVAL; 305 } 306 307 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { 308 pr_err("No 5-level paging support for first-level on %s\n", 309 iommu->name); 310 return -EINVAL; 311 } 312 313 spin_lock(&iommu->lock); 314 pte = intel_pasid_get_entry(dev, pasid); 315 if (!pte) { 316 spin_unlock(&iommu->lock); 317 return -ENODEV; 318 } 319 320 if (pasid_pte_is_present(pte)) { 321 spin_unlock(&iommu->lock); 322 return -EBUSY; 323 } 324 325 pasid_clear_entry(pte); 326 327 /* Setup the first level page table pointer: */ 328 pasid_set_flptr(pte, (u64)__pa(pgd)); 329 330 if (flags & PASID_FLAG_FL5LP) 331 pasid_set_flpm(pte, 1); 332 333 if (flags & PASID_FLAG_PAGE_SNOOP) 334 pasid_set_pgsnp(pte); 335 336 pasid_set_domain_id(pte, did); 337 pasid_set_address_width(pte, iommu->agaw); 338 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 339 340 /* Setup Present and PASID Granular Transfer Type: */ 341 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); 342 pasid_set_present(pte); 343 spin_unlock(&iommu->lock); 344 345 pasid_flush_caches(iommu, pte, pasid, did); 346 347 return 0; 348 } 349 350 /* 351 * Skip top levels of page tables for iommu which has less agaw 352 * than default. Unnecessary for PT mode. 353 */ 354 static int iommu_skip_agaw(struct dmar_domain *domain, 355 struct intel_iommu *iommu, 356 struct dma_pte **pgd) 357 { 358 int agaw; 359 360 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { 361 *pgd = phys_to_virt(dma_pte_addr(*pgd)); 362 if (!dma_pte_present(*pgd)) 363 return -EINVAL; 364 } 365 366 return agaw; 367 } 368 369 /* 370 * Set up the scalable mode pasid entry for second only translation type. 371 */ 372 int intel_pasid_setup_second_level(struct intel_iommu *iommu, 373 struct dmar_domain *domain, 374 struct device *dev, u32 pasid) 375 { 376 struct pasid_entry *pte; 377 struct dma_pte *pgd; 378 u64 pgd_val; 379 int agaw; 380 u16 did; 381 382 /* 383 * If hardware advertises no support for second level 384 * translation, return directly. 385 */ 386 if (!ecap_slts(iommu->ecap)) { 387 pr_err("No second level translation support on %s\n", 388 iommu->name); 389 return -EINVAL; 390 } 391 392 pgd = domain->pgd; 393 agaw = iommu_skip_agaw(domain, iommu, &pgd); 394 if (agaw < 0) { 395 dev_err(dev, "Invalid domain page table\n"); 396 return -EINVAL; 397 } 398 399 pgd_val = virt_to_phys(pgd); 400 did = domain_id_iommu(domain, iommu); 401 402 spin_lock(&iommu->lock); 403 pte = intel_pasid_get_entry(dev, pasid); 404 if (!pte) { 405 spin_unlock(&iommu->lock); 406 return -ENODEV; 407 } 408 409 if (pasid_pte_is_present(pte)) { 410 spin_unlock(&iommu->lock); 411 return -EBUSY; 412 } 413 414 pasid_clear_entry(pte); 415 pasid_set_domain_id(pte, did); 416 pasid_set_slptr(pte, pgd_val); 417 pasid_set_address_width(pte, agaw); 418 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); 419 pasid_set_fault_enable(pte); 420 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 421 if (domain->dirty_tracking) 422 pasid_set_ssade(pte); 423 424 pasid_set_present(pte); 425 spin_unlock(&iommu->lock); 426 427 pasid_flush_caches(iommu, pte, pasid, did); 428 429 return 0; 430 } 431 432 /* 433 * Set up dirty tracking on a second only or nested translation type. 434 */ 435 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, 436 struct device *dev, u32 pasid, 437 bool enabled) 438 { 439 struct pasid_entry *pte; 440 u16 did, pgtt; 441 442 spin_lock(&iommu->lock); 443 444 pte = intel_pasid_get_entry(dev, pasid); 445 if (!pte) { 446 spin_unlock(&iommu->lock); 447 dev_err_ratelimited( 448 dev, "Failed to get pasid entry of PASID %d\n", pasid); 449 return -ENODEV; 450 } 451 452 did = pasid_get_domain_id(pte); 453 pgtt = pasid_pte_get_pgtt(pte); 454 if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && 455 pgtt != PASID_ENTRY_PGTT_NESTED) { 456 spin_unlock(&iommu->lock); 457 dev_err_ratelimited( 458 dev, 459 "Dirty tracking not supported on translation type %d\n", 460 pgtt); 461 return -EOPNOTSUPP; 462 } 463 464 if (pasid_get_ssade(pte) == enabled) { 465 spin_unlock(&iommu->lock); 466 return 0; 467 } 468 469 if (enabled) 470 pasid_set_ssade(pte); 471 else 472 pasid_clear_ssade(pte); 473 spin_unlock(&iommu->lock); 474 475 if (!ecap_coherent(iommu->ecap)) 476 clflush_cache_range(pte, sizeof(*pte)); 477 478 /* 479 * From VT-d spec table 25 "Guidance to Software for Invalidations": 480 * 481 * - PASID-selective-within-Domain PASID-cache invalidation 482 * If (PGTT=SS or Nested) 483 * - Domain-selective IOTLB invalidation 484 * Else 485 * - PASID-selective PASID-based IOTLB invalidation 486 * - If (pasid is RID_PASID) 487 * - Global Device-TLB invalidation to affected functions 488 * Else 489 * - PASID-based Device-TLB invalidation (with S=1 and 490 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions 491 */ 492 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 493 494 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 495 496 /* Device IOTLB doesn't need to be flushed in caching mode. */ 497 if (!cap_caching_mode(iommu->cap)) 498 devtlb_invalidation_with_pasid(iommu, dev, pasid); 499 500 return 0; 501 } 502 503 /* 504 * Set up the scalable mode pasid entry for passthrough translation type. 505 */ 506 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, 507 struct device *dev, u32 pasid) 508 { 509 u16 did = FLPT_DEFAULT_DID; 510 struct pasid_entry *pte; 511 512 spin_lock(&iommu->lock); 513 pte = intel_pasid_get_entry(dev, pasid); 514 if (!pte) { 515 spin_unlock(&iommu->lock); 516 return -ENODEV; 517 } 518 519 if (pasid_pte_is_present(pte)) { 520 spin_unlock(&iommu->lock); 521 return -EBUSY; 522 } 523 524 pasid_clear_entry(pte); 525 pasid_set_domain_id(pte, did); 526 pasid_set_address_width(pte, iommu->agaw); 527 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); 528 pasid_set_fault_enable(pte); 529 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 530 pasid_set_present(pte); 531 spin_unlock(&iommu->lock); 532 533 pasid_flush_caches(iommu, pte, pasid, did); 534 535 return 0; 536 } 537 538 /* 539 * Set the page snoop control for a pasid entry which has been set up. 540 */ 541 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, 542 struct device *dev, u32 pasid) 543 { 544 struct pasid_entry *pte; 545 u16 did; 546 547 spin_lock(&iommu->lock); 548 pte = intel_pasid_get_entry(dev, pasid); 549 if (WARN_ON(!pte || !pasid_pte_is_present(pte))) { 550 spin_unlock(&iommu->lock); 551 return; 552 } 553 554 pasid_set_pgsnp(pte); 555 did = pasid_get_domain_id(pte); 556 spin_unlock(&iommu->lock); 557 558 if (!ecap_coherent(iommu->ecap)) 559 clflush_cache_range(pte, sizeof(*pte)); 560 561 /* 562 * VT-d spec 3.4 table23 states guides for cache invalidation: 563 * 564 * - PASID-selective-within-Domain PASID-cache invalidation 565 * - PASID-selective PASID-based IOTLB invalidation 566 * - If (pasid is RID_PASID) 567 * - Global Device-TLB invalidation to affected functions 568 * Else 569 * - PASID-based Device-TLB invalidation (with S=1 and 570 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions 571 */ 572 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 573 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 574 575 /* Device IOTLB doesn't need to be flushed in caching mode. */ 576 if (!cap_caching_mode(iommu->cap)) 577 devtlb_invalidation_with_pasid(iommu, dev, pasid); 578 } 579 580 /** 581 * intel_pasid_setup_nested() - Set up PASID entry for nested translation. 582 * @iommu: IOMMU which the device belong to 583 * @dev: Device to be set up for translation 584 * @pasid: PASID to be programmed in the device PASID table 585 * @domain: User stage-1 domain nested on a stage-2 domain 586 * 587 * This is used for nested translation. The input domain should be 588 * nested type and nested on a parent with 'is_nested_parent' flag 589 * set. 590 */ 591 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, 592 u32 pasid, struct dmar_domain *domain) 593 { 594 struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; 595 pgd_t *s1_gpgd = (pgd_t *)(uintptr_t)domain->s1_pgtbl; 596 struct dmar_domain *s2_domain = domain->s2_domain; 597 u16 did = domain_id_iommu(domain, iommu); 598 struct dma_pte *pgd = s2_domain->pgd; 599 struct pasid_entry *pte; 600 601 /* Address width should match the address width supported by hardware */ 602 switch (s1_cfg->addr_width) { 603 case ADDR_WIDTH_4LEVEL: 604 break; 605 case ADDR_WIDTH_5LEVEL: 606 if (!cap_fl5lp_support(iommu->cap)) { 607 dev_err_ratelimited(dev, 608 "5-level paging not supported\n"); 609 return -EINVAL; 610 } 611 break; 612 default: 613 dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n", 614 s1_cfg->addr_width); 615 return -EINVAL; 616 } 617 618 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { 619 pr_err_ratelimited("No supervisor request support on %s\n", 620 iommu->name); 621 return -EINVAL; 622 } 623 624 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { 625 pr_err_ratelimited("No extended access flag support on %s\n", 626 iommu->name); 627 return -EINVAL; 628 } 629 630 spin_lock(&iommu->lock); 631 pte = intel_pasid_get_entry(dev, pasid); 632 if (!pte) { 633 spin_unlock(&iommu->lock); 634 return -ENODEV; 635 } 636 if (pasid_pte_is_present(pte)) { 637 spin_unlock(&iommu->lock); 638 return -EBUSY; 639 } 640 641 pasid_clear_entry(pte); 642 643 if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL) 644 pasid_set_flpm(pte, 1); 645 646 pasid_set_flptr(pte, (uintptr_t)s1_gpgd); 647 648 if (s1_cfg->flags & IOMMU_VTD_S1_SRE) { 649 pasid_set_sre(pte); 650 if (s1_cfg->flags & IOMMU_VTD_S1_WPE) 651 pasid_set_wpe(pte); 652 } 653 654 if (s1_cfg->flags & IOMMU_VTD_S1_EAFE) 655 pasid_set_eafe(pte); 656 657 if (s2_domain->force_snooping) 658 pasid_set_pgsnp(pte); 659 660 pasid_set_slptr(pte, virt_to_phys(pgd)); 661 pasid_set_fault_enable(pte); 662 pasid_set_domain_id(pte, did); 663 pasid_set_address_width(pte, s2_domain->agaw); 664 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 665 if (s2_domain->dirty_tracking) 666 pasid_set_ssade(pte); 667 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); 668 pasid_set_present(pte); 669 spin_unlock(&iommu->lock); 670 671 pasid_flush_caches(iommu, pte, pasid, did); 672 673 return 0; 674 } 675 676 /* 677 * Interfaces to setup or teardown a pasid table to the scalable-mode 678 * context table entry: 679 */ 680 681 static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn) 682 { 683 struct device_domain_info *info = dev_iommu_priv_get(dev); 684 struct intel_iommu *iommu = info->iommu; 685 struct context_entry *context; 686 u16 did; 687 688 spin_lock(&iommu->lock); 689 context = iommu_context_addr(iommu, bus, devfn, false); 690 if (!context) { 691 spin_unlock(&iommu->lock); 692 return; 693 } 694 695 did = context_domain_id(context); 696 context_clear_entry(context); 697 __iommu_flush_cache(iommu, context, sizeof(*context)); 698 spin_unlock(&iommu->lock); 699 intel_context_flush_present(info, context, did, false); 700 } 701 702 static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data) 703 { 704 struct device *dev = data; 705 706 if (dev == &pdev->dev) 707 device_pasid_table_teardown(dev, PCI_BUS_NUM(alias), alias & 0xff); 708 709 return 0; 710 } 711 712 void intel_pasid_teardown_sm_context(struct device *dev) 713 { 714 struct device_domain_info *info = dev_iommu_priv_get(dev); 715 716 if (!dev_is_pci(dev)) { 717 device_pasid_table_teardown(dev, info->bus, info->devfn); 718 return; 719 } 720 721 pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev); 722 } 723 724 /* 725 * Get the PASID directory size for scalable mode context entry. 726 * Value of X in the PDTS field of a scalable mode context entry 727 * indicates PASID directory with 2^(X + 7) entries. 728 */ 729 static unsigned long context_get_sm_pds(struct pasid_table *table) 730 { 731 unsigned long pds, max_pde; 732 733 max_pde = table->max_pasid >> PASID_PDE_SHIFT; 734 pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS); 735 if (pds < 7) 736 return 0; 737 738 return pds - 7; 739 } 740 741 static int context_entry_set_pasid_table(struct context_entry *context, 742 struct device *dev) 743 { 744 struct device_domain_info *info = dev_iommu_priv_get(dev); 745 struct pasid_table *table = info->pasid_table; 746 struct intel_iommu *iommu = info->iommu; 747 unsigned long pds; 748 749 context_clear_entry(context); 750 751 pds = context_get_sm_pds(table); 752 context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds); 753 context_set_sm_rid2pasid(context, IOMMU_NO_PASID); 754 755 if (info->ats_supported) 756 context_set_sm_dte(context); 757 if (info->pasid_supported) 758 context_set_pasid(context); 759 760 context_set_fault_enable(context); 761 context_set_present(context); 762 __iommu_flush_cache(iommu, context, sizeof(*context)); 763 764 return 0; 765 } 766 767 static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn) 768 { 769 struct device_domain_info *info = dev_iommu_priv_get(dev); 770 struct intel_iommu *iommu = info->iommu; 771 struct context_entry *context; 772 773 spin_lock(&iommu->lock); 774 context = iommu_context_addr(iommu, bus, devfn, true); 775 if (!context) { 776 spin_unlock(&iommu->lock); 777 return -ENOMEM; 778 } 779 780 if (context_present(context) && !context_copied(iommu, bus, devfn)) { 781 spin_unlock(&iommu->lock); 782 return 0; 783 } 784 785 if (context_copied(iommu, bus, devfn)) { 786 context_clear_entry(context); 787 __iommu_flush_cache(iommu, context, sizeof(*context)); 788 789 /* 790 * For kdump cases, old valid entries may be cached due to 791 * the in-flight DMA and copied pgtable, but there is no 792 * unmapping behaviour for them, thus we need explicit cache 793 * flushes for all affected domain IDs and PASIDs used in 794 * the copied PASID table. Given that we have no idea about 795 * which domain IDs and PASIDs were used in the copied tables, 796 * upgrade them to global PASID and IOTLB cache invalidation. 797 */ 798 iommu->flush.flush_context(iommu, 0, 799 PCI_DEVID(bus, devfn), 800 DMA_CCMD_MASK_NOBIT, 801 DMA_CCMD_DEVICE_INVL); 802 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); 803 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); 804 devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID); 805 806 /* 807 * At this point, the device is supposed to finish reset at 808 * its driver probe stage, so no in-flight DMA will exist, 809 * and we don't need to worry anymore hereafter. 810 */ 811 clear_context_copied(iommu, bus, devfn); 812 } 813 814 context_entry_set_pasid_table(context, dev); 815 spin_unlock(&iommu->lock); 816 817 /* 818 * It's a non-present to present mapping. If hardware doesn't cache 819 * non-present entry we don't need to flush the caches. If it does 820 * cache non-present entries, then it does so in the special 821 * domain #0, which we have to flush: 822 */ 823 if (cap_caching_mode(iommu->cap)) { 824 iommu->flush.flush_context(iommu, 0, 825 PCI_DEVID(bus, devfn), 826 DMA_CCMD_MASK_NOBIT, 827 DMA_CCMD_DEVICE_INVL); 828 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); 829 } 830 831 return 0; 832 } 833 834 static int pci_pasid_table_setup(struct pci_dev *pdev, u16 alias, void *data) 835 { 836 struct device *dev = data; 837 838 if (dev != &pdev->dev) 839 return 0; 840 841 return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff); 842 } 843 844 /* 845 * Set the device's PASID table to its context table entry. 846 * 847 * The PASID table is set to the context entries of both device itself 848 * and its alias requester ID for DMA. 849 */ 850 int intel_pasid_setup_sm_context(struct device *dev) 851 { 852 struct device_domain_info *info = dev_iommu_priv_get(dev); 853 854 if (!dev_is_pci(dev)) 855 return device_pasid_table_setup(dev, info->bus, info->devfn); 856 857 return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev); 858 } 859 860 /* 861 * Global Device-TLB invalidation following changes in a context entry which 862 * was present. 863 */ 864 static void __context_flush_dev_iotlb(struct device_domain_info *info) 865 { 866 if (!info->ats_enabled) 867 return; 868 869 qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn), 870 info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH); 871 872 /* 873 * There is no guarantee that the device DMA is stopped when it reaches 874 * here. Therefore, always attempt the extra device TLB invalidation 875 * quirk. The impact on performance is acceptable since this is not a 876 * performance-critical path. 877 */ 878 quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID, 879 info->ats_qdep); 880 } 881 882 /* 883 * Cache invalidations after change in a context table entry that was present 884 * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If 885 * IOMMU is in scalable mode and all PASID table entries of the device were 886 * non-present, set flush_domains to false. Otherwise, true. 887 */ 888 void intel_context_flush_present(struct device_domain_info *info, 889 struct context_entry *context, 890 u16 did, bool flush_domains) 891 { 892 struct intel_iommu *iommu = info->iommu; 893 struct pasid_entry *pte; 894 int i; 895 896 /* 897 * Device-selective context-cache invalidation. The Domain-ID field 898 * of the Context-cache Invalidate Descriptor is ignored by hardware 899 * when operating in scalable mode. Therefore the @did value doesn't 900 * matter in scalable mode. 901 */ 902 iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn), 903 DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); 904 905 /* 906 * For legacy mode: 907 * - Domain-selective IOTLB invalidation 908 * - Global Device-TLB invalidation to all affected functions 909 */ 910 if (!sm_supported(iommu)) { 911 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 912 __context_flush_dev_iotlb(info); 913 914 return; 915 } 916 917 /* 918 * For scalable mode: 919 * - Domain-selective PASID-cache invalidation to affected domains 920 * - Domain-selective IOTLB invalidation to affected domains 921 * - Global Device-TLB invalidation to affected functions 922 */ 923 if (flush_domains) { 924 /* 925 * If the IOMMU is running in scalable mode and there might 926 * be potential PASID translations, the caller should hold 927 * the lock to ensure that context changes and cache flushes 928 * are atomic. 929 */ 930 assert_spin_locked(&iommu->lock); 931 for (i = 0; i < info->pasid_table->max_pasid; i++) { 932 pte = intel_pasid_get_entry(info->dev, i); 933 if (!pte || !pasid_pte_is_present(pte)) 934 continue; 935 936 did = pasid_get_domain_id(pte); 937 qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0); 938 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 939 } 940 } 941 942 __context_flush_dev_iotlb(info); 943 } 944