1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/ratelimit.h> 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci-ats.h> 15 #include <linux/bitmap.h> 16 #include <linux/slab.h> 17 #include <linux/string_choices.h> 18 #include <linux/debugfs.h> 19 #include <linux/scatterlist.h> 20 #include <linux/dma-map-ops.h> 21 #include <linux/dma-direct.h> 22 #include <linux/idr.h> 23 #include <linux/iommu-helper.h> 24 #include <linux/delay.h> 25 #include <linux/amd-iommu.h> 26 #include <linux/notifier.h> 27 #include <linux/export.h> 28 #include <linux/irq.h> 29 #include <linux/irqchip/irq-msi-lib.h> 30 #include <linux/msi.h> 31 #include <linux/irqdomain.h> 32 #include <linux/percpu.h> 33 #include <linux/cc_platform.h> 34 #include <asm/irq_remapping.h> 35 #include <asm/io_apic.h> 36 #include <asm/apic.h> 37 #include <asm/hw_irq.h> 38 #include <asm/proto.h> 39 #include <asm/iommu.h> 40 #include <asm/gart.h> 41 #include <asm/dma.h> 42 #include <uapi/linux/iommufd.h> 43 #include <linux/generic_pt/iommu.h> 44 45 #include "amd_iommu.h" 46 #include "../irq_remapping.h" 47 #include "../iommu-pages.h" 48 49 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 50 51 /* Reserved IOVA ranges */ 52 #define MSI_RANGE_START (0xfee00000) 53 #define MSI_RANGE_END (0xfeefffff) 54 #define HT_RANGE_START (0xfd00000000ULL) 55 #define HT_RANGE_END (0xffffffffffULL) 56 57 LIST_HEAD(ioapic_map); 58 LIST_HEAD(hpet_map); 59 LIST_HEAD(acpihid_map); 60 61 const struct iommu_ops amd_iommu_ops; 62 63 int amd_iommu_max_glx_val = -1; 64 65 /* 66 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 67 * to know which ones are already in use. 68 */ 69 DEFINE_IDA(pdom_ids); 70 71 static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev, 72 struct iommu_domain *old); 73 74 static void set_dte_entry(struct amd_iommu *iommu, 75 struct iommu_dev_data *dev_data, 76 phys_addr_t top_paddr, unsigned int top_level); 77 78 static void amd_iommu_change_top(struct pt_iommu *iommu_table, 79 phys_addr_t top_paddr, unsigned int top_level); 80 81 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid); 82 83 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid); 84 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain); 85 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, 86 bool enable); 87 88 /**************************************************************************** 89 * 90 * Helper functions 91 * 92 ****************************************************************************/ 93 94 static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val) 95 { 96 /* 97 * Note: 98 * We use arch_cmpxchg128_local() because: 99 * - Need cmpxchg16b instruction mainly for 128-bit store to DTE 100 * (not necessary for cmpxchg since this function is already 101 * protected by a spin_lock for this DTE). 102 * - Neither need LOCK_PREFIX nor try loop because of the spin_lock. 103 */ 104 arch_cmpxchg128_local(ptr, *ptr, val); 105 } 106 107 static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new) 108 { 109 struct dev_table_entry old; 110 111 old.data128[1] = ptr->data128[1]; 112 /* 113 * Preserve DTE_DATA2_INTR_MASK. This needs to be 114 * done here since it requires to be inside 115 * spin_lock(&dev_data->dte_lock) context. 116 */ 117 new->data[2] &= ~DTE_DATA2_INTR_MASK; 118 new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK; 119 120 amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]); 121 } 122 123 static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new) 124 { 125 amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]); 126 } 127 128 /* 129 * Note: 130 * IOMMU reads the entire Device Table entry in a single 256-bit transaction 131 * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver 132 * need to ensure the following: 133 * - DTE[V|GV] bit is being written last when setting. 134 * - DTE[V|GV] bit is being written first when clearing. 135 * 136 * This function is used only by code, which updates DMA translation part of the DTE. 137 * So, only consider control bits related to DMA when updating the entry. 138 */ 139 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, 140 struct dev_table_entry *new) 141 { 142 unsigned long flags; 143 struct dev_table_entry *dev_table = get_dev_table(iommu); 144 struct dev_table_entry *ptr = &dev_table[dev_data->devid]; 145 146 spin_lock_irqsave(&dev_data->dte_lock, flags); 147 148 if (!(ptr->data[0] & DTE_FLAG_V)) { 149 /* Existing DTE is not valid. */ 150 write_dte_upper128(ptr, new); 151 write_dte_lower128(ptr, new); 152 iommu_flush_dte_sync(iommu, dev_data->devid); 153 } else if (!(new->data[0] & DTE_FLAG_V)) { 154 /* Existing DTE is valid. New DTE is not valid. */ 155 write_dte_lower128(ptr, new); 156 write_dte_upper128(ptr, new); 157 iommu_flush_dte_sync(iommu, dev_data->devid); 158 } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) { 159 /* 160 * Both DTEs are valid. 161 * Existing DTE has no guest page table. 162 */ 163 write_dte_upper128(ptr, new); 164 write_dte_lower128(ptr, new); 165 iommu_flush_dte_sync(iommu, dev_data->devid); 166 } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) { 167 /* 168 * Both DTEs are valid. 169 * Existing DTE has guest page table, 170 * new DTE has no guest page table, 171 */ 172 write_dte_lower128(ptr, new); 173 write_dte_upper128(ptr, new); 174 iommu_flush_dte_sync(iommu, dev_data->devid); 175 } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) != 176 FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) { 177 /* 178 * Both DTEs are valid and have guest page table, 179 * but have different number of levels. So, we need 180 * to upadte both upper and lower 128-bit value, which 181 * require disabling and flushing. 182 */ 183 struct dev_table_entry clear = {}; 184 185 /* First disable DTE */ 186 write_dte_lower128(ptr, &clear); 187 iommu_flush_dte_sync(iommu, dev_data->devid); 188 189 /* Then update DTE */ 190 write_dte_upper128(ptr, new); 191 write_dte_lower128(ptr, new); 192 iommu_flush_dte_sync(iommu, dev_data->devid); 193 } else { 194 /* 195 * Both DTEs are valid and have guest page table, 196 * and same number of levels. We just need to only 197 * update the lower 128-bit. So no need to disable DTE. 198 */ 199 write_dte_lower128(ptr, new); 200 } 201 202 spin_unlock_irqrestore(&dev_data->dte_lock, flags); 203 } 204 205 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, 206 struct dev_table_entry *dte) 207 { 208 unsigned long flags; 209 struct dev_table_entry *ptr; 210 struct dev_table_entry *dev_table = get_dev_table(iommu); 211 212 ptr = &dev_table[dev_data->devid]; 213 214 spin_lock_irqsave(&dev_data->dte_lock, flags); 215 dte->data128[0] = ptr->data128[0]; 216 dte->data128[1] = ptr->data128[1]; 217 spin_unlock_irqrestore(&dev_data->dte_lock, flags); 218 } 219 220 static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom) 221 { 222 return (pdom && (pdom->pd_mode == PD_MODE_V2)); 223 } 224 225 static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom) 226 { 227 return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); 228 } 229 230 /* 231 * We cannot support PASID w/ existing v1 page table in the same domain 232 * since it will be nested. However, existing domain w/ v2 page table 233 * or passthrough mode can be used for PASID. 234 */ 235 static inline bool pdom_is_sva_capable(struct protection_domain *pdom) 236 { 237 return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom); 238 } 239 240 static inline int get_acpihid_device_id(struct device *dev, 241 struct acpihid_map_entry **entry) 242 { 243 struct acpi_device *adev = ACPI_COMPANION(dev); 244 struct acpihid_map_entry *p, *p1 = NULL; 245 int hid_count = 0; 246 bool fw_bug; 247 248 if (!adev) 249 return -ENODEV; 250 251 list_for_each_entry(p, &acpihid_map, list) { 252 if (acpi_dev_hid_uid_match(adev, p->hid, 253 p->uid[0] ? p->uid : NULL)) { 254 p1 = p; 255 fw_bug = false; 256 hid_count = 1; 257 break; 258 } 259 260 /* 261 * Count HID matches w/o UID, raise FW_BUG but allow exactly one match 262 */ 263 if (acpi_dev_hid_match(adev, p->hid)) { 264 p1 = p; 265 hid_count++; 266 fw_bug = true; 267 } 268 } 269 270 if (!p1) 271 return -EINVAL; 272 if (fw_bug) 273 dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n", 274 hid_count, str_plural(hid_count)); 275 if (hid_count > 1) 276 return -EINVAL; 277 if (entry) 278 *entry = p1; 279 280 return p1->devid; 281 } 282 283 static inline int get_device_sbdf_id(struct device *dev) 284 { 285 int sbdf; 286 287 if (dev_is_pci(dev)) 288 sbdf = get_pci_sbdf_id(to_pci_dev(dev)); 289 else 290 sbdf = get_acpihid_device_id(dev, NULL); 291 292 return sbdf; 293 } 294 295 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) 296 { 297 struct dev_table_entry *dev_table; 298 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 299 300 BUG_ON(pci_seg == NULL); 301 dev_table = pci_seg->dev_table; 302 BUG_ON(dev_table == NULL); 303 304 return dev_table; 305 } 306 307 static inline u16 get_device_segment(struct device *dev) 308 { 309 u16 seg; 310 311 if (dev_is_pci(dev)) { 312 struct pci_dev *pdev = to_pci_dev(dev); 313 314 seg = pci_domain_nr(pdev->bus); 315 } else { 316 u32 devid = get_acpihid_device_id(dev, NULL); 317 318 seg = PCI_SBDF_TO_SEGID(devid); 319 } 320 321 return seg; 322 } 323 324 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */ 325 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) 326 { 327 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 328 329 pci_seg->rlookup_table[devid] = iommu; 330 } 331 332 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid) 333 { 334 struct amd_iommu_pci_seg *pci_seg; 335 336 for_each_pci_segment(pci_seg) { 337 if (pci_seg->id == seg) 338 return pci_seg->rlookup_table[devid]; 339 } 340 return NULL; 341 } 342 343 static struct amd_iommu *rlookup_amd_iommu(struct device *dev) 344 { 345 u16 seg = get_device_segment(dev); 346 int devid = get_device_sbdf_id(dev); 347 348 if (devid < 0) 349 return NULL; 350 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); 351 } 352 353 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) 354 { 355 struct iommu_dev_data *dev_data; 356 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 357 358 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 359 if (!dev_data) 360 return NULL; 361 362 mutex_init(&dev_data->mutex); 363 spin_lock_init(&dev_data->dte_lock); 364 dev_data->devid = devid; 365 ratelimit_default_init(&dev_data->rs); 366 367 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); 368 return dev_data; 369 } 370 371 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) 372 { 373 struct iommu_dev_data *dev_data; 374 struct llist_node *node; 375 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 376 377 if (llist_empty(&pci_seg->dev_data_list)) 378 return NULL; 379 380 node = pci_seg->dev_data_list.first; 381 llist_for_each_entry(dev_data, node, dev_data_list) { 382 if (dev_data->devid == devid) 383 return dev_data; 384 } 385 386 return NULL; 387 } 388 389 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) 390 { 391 struct dev_table_entry new; 392 struct amd_iommu *iommu; 393 struct iommu_dev_data *dev_data, *alias_data; 394 u16 devid = pci_dev_id(pdev); 395 int ret = 0; 396 397 if (devid == alias) 398 return 0; 399 400 iommu = rlookup_amd_iommu(&pdev->dev); 401 if (!iommu) 402 return 0; 403 404 /* Copy the data from pdev */ 405 dev_data = dev_iommu_priv_get(&pdev->dev); 406 if (!dev_data) { 407 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); 408 ret = -EINVAL; 409 goto out; 410 } 411 get_dte256(iommu, dev_data, &new); 412 413 /* Setup alias */ 414 alias_data = find_dev_data(iommu, alias); 415 if (!alias_data) { 416 pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias); 417 ret = -EINVAL; 418 goto out; 419 } 420 update_dte256(iommu, alias_data, &new); 421 422 amd_iommu_set_rlookup_table(iommu, alias); 423 out: 424 return ret; 425 } 426 427 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) 428 { 429 struct pci_dev *pdev; 430 431 if (!dev_is_pci(dev)) 432 return; 433 pdev = to_pci_dev(dev); 434 435 /* 436 * The IVRS alias stored in the alias table may not be 437 * part of the PCI DMA aliases if it's bus differs 438 * from the original device. 439 */ 440 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); 441 442 pci_for_each_dma_alias(pdev, clone_alias, NULL); 443 } 444 445 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) 446 { 447 struct pci_dev *pdev = to_pci_dev(dev); 448 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 449 u16 ivrs_alias; 450 451 /* For ACPI HID devices, there are no aliases */ 452 if (!dev_is_pci(dev)) 453 return; 454 455 /* 456 * Add the IVRS alias to the pci aliases if it is on the same 457 * bus. The IVRS table may know about a quirk that we don't. 458 */ 459 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)]; 460 if (ivrs_alias != pci_dev_id(pdev) && 461 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) 462 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); 463 464 clone_aliases(iommu, dev); 465 } 466 467 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) 468 { 469 struct iommu_dev_data *dev_data; 470 471 dev_data = search_dev_data(iommu, devid); 472 473 if (dev_data == NULL) { 474 dev_data = alloc_dev_data(iommu, devid); 475 if (!dev_data) 476 return NULL; 477 478 if (translation_pre_enabled(iommu)) 479 dev_data->defer_attach = true; 480 } 481 482 return dev_data; 483 } 484 485 /* 486 * Find or create an IOMMU group for a acpihid device. 487 */ 488 static struct iommu_group *acpihid_device_group(struct device *dev) 489 { 490 struct acpihid_map_entry *p, *entry = NULL; 491 int devid; 492 493 devid = get_acpihid_device_id(dev, &entry); 494 if (devid < 0) 495 return ERR_PTR(devid); 496 497 list_for_each_entry(p, &acpihid_map, list) { 498 if ((devid == p->devid) && p->group) 499 entry->group = p->group; 500 } 501 502 if (!entry->group) 503 entry->group = generic_device_group(dev); 504 else 505 iommu_group_ref_get(entry->group); 506 507 return entry->group; 508 } 509 510 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) 511 { 512 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); 513 } 514 515 static u32 pdev_get_caps(struct pci_dev *pdev) 516 { 517 int features; 518 u32 flags = 0; 519 520 if (pci_ats_supported(pdev)) 521 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; 522 523 if (pci_pri_supported(pdev)) 524 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; 525 526 features = pci_pasid_features(pdev); 527 if (features >= 0) { 528 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; 529 530 if (features & PCI_PASID_CAP_EXEC) 531 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; 532 533 if (features & PCI_PASID_CAP_PRIV) 534 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; 535 } 536 537 return flags; 538 } 539 540 static inline int pdev_enable_cap_ats(struct pci_dev *pdev) 541 { 542 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 543 int ret = -EINVAL; 544 545 if (dev_data->ats_enabled) 546 return 0; 547 548 if (amd_iommu_iotlb_sup && 549 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { 550 ret = pci_enable_ats(pdev, PAGE_SHIFT); 551 if (!ret) { 552 dev_data->ats_enabled = 1; 553 dev_data->ats_qdep = pci_ats_queue_depth(pdev); 554 } 555 } 556 557 return ret; 558 } 559 560 static inline void pdev_disable_cap_ats(struct pci_dev *pdev) 561 { 562 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 563 564 if (dev_data->ats_enabled) { 565 pci_disable_ats(pdev); 566 dev_data->ats_enabled = 0; 567 } 568 } 569 570 static inline int pdev_enable_cap_pri(struct pci_dev *pdev) 571 { 572 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 573 int ret = -EINVAL; 574 575 if (dev_data->pri_enabled) 576 return 0; 577 578 if (!dev_data->ats_enabled) 579 return 0; 580 581 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { 582 /* 583 * First reset the PRI state of the device. 584 * FIXME: Hardcode number of outstanding requests for now 585 */ 586 if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) { 587 dev_data->pri_enabled = 1; 588 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); 589 590 ret = 0; 591 } 592 } 593 594 return ret; 595 } 596 597 static inline void pdev_disable_cap_pri(struct pci_dev *pdev) 598 { 599 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 600 601 if (dev_data->pri_enabled) { 602 pci_disable_pri(pdev); 603 dev_data->pri_enabled = 0; 604 } 605 } 606 607 static inline int pdev_enable_cap_pasid(struct pci_dev *pdev) 608 { 609 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 610 int ret = -EINVAL; 611 612 if (dev_data->pasid_enabled) 613 return 0; 614 615 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { 616 /* Only allow access to user-accessible pages */ 617 ret = pci_enable_pasid(pdev, 0); 618 if (!ret) 619 dev_data->pasid_enabled = 1; 620 } 621 622 return ret; 623 } 624 625 static inline void pdev_disable_cap_pasid(struct pci_dev *pdev) 626 { 627 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 628 629 if (dev_data->pasid_enabled) { 630 pci_disable_pasid(pdev); 631 dev_data->pasid_enabled = 0; 632 } 633 } 634 635 static void pdev_enable_caps(struct pci_dev *pdev) 636 { 637 pdev_enable_cap_pasid(pdev); 638 pdev_enable_cap_ats(pdev); 639 pdev_enable_cap_pri(pdev); 640 } 641 642 static void pdev_disable_caps(struct pci_dev *pdev) 643 { 644 pdev_disable_cap_ats(pdev); 645 pdev_disable_cap_pasid(pdev); 646 pdev_disable_cap_pri(pdev); 647 } 648 649 /* 650 * This function checks if the driver got a valid device from the caller to 651 * avoid dereferencing invalid pointers. 652 */ 653 static bool check_device(struct device *dev) 654 { 655 struct amd_iommu_pci_seg *pci_seg; 656 struct amd_iommu *iommu; 657 int devid, sbdf; 658 659 if (!dev) 660 return false; 661 662 sbdf = get_device_sbdf_id(dev); 663 if (sbdf < 0) 664 return false; 665 devid = PCI_SBDF_TO_DEVID(sbdf); 666 667 iommu = rlookup_amd_iommu(dev); 668 if (!iommu) 669 return false; 670 671 /* Out of our scope? */ 672 pci_seg = iommu->pci_seg; 673 if (devid > pci_seg->last_bdf) 674 return false; 675 676 return true; 677 } 678 679 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) 680 { 681 struct iommu_dev_data *dev_data; 682 int devid, sbdf; 683 684 if (dev_iommu_priv_get(dev)) 685 return 0; 686 687 sbdf = get_device_sbdf_id(dev); 688 if (sbdf < 0) 689 return sbdf; 690 691 devid = PCI_SBDF_TO_DEVID(sbdf); 692 dev_data = find_dev_data(iommu, devid); 693 if (!dev_data) 694 return -ENOMEM; 695 696 dev_data->dev = dev; 697 698 /* 699 * The dev_iommu_priv_set() needes to be called before setup_aliases. 700 * Otherwise, subsequent call to dev_iommu_priv_get() will fail. 701 */ 702 dev_iommu_priv_set(dev, dev_data); 703 setup_aliases(iommu, dev); 704 705 /* 706 * By default we use passthrough mode for IOMMUv2 capable device. 707 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to 708 * invalid address), we ignore the capability for the device so 709 * it'll be forced to go into translation mode. 710 */ 711 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && 712 dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) { 713 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); 714 } 715 716 return 0; 717 } 718 719 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) 720 { 721 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 722 struct dev_table_entry *dev_table = get_dev_table(iommu); 723 int devid, sbdf; 724 725 sbdf = get_device_sbdf_id(dev); 726 if (sbdf < 0) 727 return; 728 729 devid = PCI_SBDF_TO_DEVID(sbdf); 730 pci_seg->rlookup_table[devid] = NULL; 731 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry)); 732 733 setup_aliases(iommu, dev); 734 } 735 736 737 /**************************************************************************** 738 * 739 * Interrupt handling functions 740 * 741 ****************************************************************************/ 742 743 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) 744 { 745 int i; 746 struct dev_table_entry dte; 747 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); 748 749 get_dte256(iommu, dev_data, &dte); 750 751 for (i = 0; i < 4; ++i) 752 pr_err("DTE[%d]: %016llx\n", i, dte.data[i]); 753 } 754 755 static void dump_command(unsigned long phys_addr) 756 { 757 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr); 758 int i; 759 760 for (i = 0; i < 4; ++i) 761 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); 762 } 763 764 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) 765 { 766 struct iommu_dev_data *dev_data = NULL; 767 int devid, vmg_tag, flags; 768 struct pci_dev *pdev; 769 u64 spa; 770 771 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 772 vmg_tag = (event[1]) & 0xFFFF; 773 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 774 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); 775 776 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 777 devid & 0xff); 778 if (pdev) 779 dev_data = dev_iommu_priv_get(&pdev->dev); 780 781 if (dev_data) { 782 if (__ratelimit(&dev_data->rs)) { 783 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", 784 vmg_tag, spa, flags); 785 } 786 } else { 787 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", 788 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 789 vmg_tag, spa, flags); 790 } 791 792 if (pdev) 793 pci_dev_put(pdev); 794 } 795 796 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) 797 { 798 struct iommu_dev_data *dev_data = NULL; 799 int devid, flags_rmp, vmg_tag, flags; 800 struct pci_dev *pdev; 801 u64 gpa; 802 803 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 804 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF; 805 vmg_tag = (event[1]) & 0xFFFF; 806 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 807 gpa = ((u64)event[3] << 32) | event[2]; 808 809 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 810 devid & 0xff); 811 if (pdev) 812 dev_data = dev_iommu_priv_get(&pdev->dev); 813 814 if (dev_data) { 815 if (__ratelimit(&dev_data->rs)) { 816 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", 817 vmg_tag, gpa, flags_rmp, flags); 818 } 819 } else { 820 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", 821 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 822 vmg_tag, gpa, flags_rmp, flags); 823 } 824 825 if (pdev) 826 pci_dev_put(pdev); 827 } 828 829 #define IS_IOMMU_MEM_TRANSACTION(flags) \ 830 (((flags) & EVENT_FLAG_I) == 0) 831 832 #define IS_WRITE_REQUEST(flags) \ 833 ((flags) & EVENT_FLAG_RW) 834 835 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, 836 u16 devid, u16 domain_id, 837 u64 address, int flags) 838 { 839 struct iommu_dev_data *dev_data = NULL; 840 struct pci_dev *pdev; 841 842 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 843 devid & 0xff); 844 if (pdev) 845 dev_data = dev_iommu_priv_get(&pdev->dev); 846 847 if (dev_data) { 848 /* 849 * If this is a DMA fault (for which the I(nterrupt) 850 * bit will be unset), allow report_iommu_fault() to 851 * prevent logging it. 852 */ 853 if (IS_IOMMU_MEM_TRANSACTION(flags)) { 854 /* Device not attached to domain properly */ 855 if (dev_data->domain == NULL) { 856 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n"); 857 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n", 858 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), 859 PCI_FUNC(devid), domain_id); 860 goto out; 861 } 862 863 if (!report_iommu_fault(&dev_data->domain->domain, 864 &pdev->dev, address, 865 IS_WRITE_REQUEST(flags) ? 866 IOMMU_FAULT_WRITE : 867 IOMMU_FAULT_READ)) 868 goto out; 869 } 870 871 if (__ratelimit(&dev_data->rs)) { 872 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", 873 domain_id, address, flags); 874 } 875 } else { 876 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", 877 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 878 domain_id, address, flags); 879 } 880 881 out: 882 if (pdev) 883 pci_dev_put(pdev); 884 } 885 886 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) 887 { 888 struct device *dev = iommu->iommu.dev; 889 int type, devid, flags, tag; 890 volatile u32 *event = __evt; 891 int count = 0; 892 u64 address, ctrl; 893 u32 pasid; 894 895 retry: 896 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 897 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 898 pasid = (event[0] & EVENT_DOMID_MASK_HI) | 899 (event[1] & EVENT_DOMID_MASK_LO); 900 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 901 address = (u64)(((u64)event[3]) << 32) | event[2]; 902 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 903 904 if (type == 0) { 905 /* Did we hit the erratum? */ 906 if (++count == LOOP_TIMEOUT) { 907 pr_err("No event written to event log\n"); 908 return; 909 } 910 udelay(1); 911 goto retry; 912 } 913 914 if (type == EVENT_TYPE_IO_FAULT) { 915 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); 916 return; 917 } 918 919 switch (type) { 920 case EVENT_TYPE_ILL_DEV: 921 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", 922 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 923 pasid, address, flags); 924 dev_err(dev, "Control Reg : 0x%llx\n", ctrl); 925 dump_dte_entry(iommu, devid); 926 break; 927 case EVENT_TYPE_DEV_TAB_ERR: 928 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x " 929 "address=0x%llx flags=0x%04x]\n", 930 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 931 address, flags); 932 break; 933 case EVENT_TYPE_PAGE_TAB_ERR: 934 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", 935 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 936 pasid, address, flags); 937 break; 938 case EVENT_TYPE_ILL_CMD: 939 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address); 940 dump_command(address); 941 break; 942 case EVENT_TYPE_CMD_HARD_ERR: 943 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n", 944 address, flags); 945 break; 946 case EVENT_TYPE_IOTLB_INV_TO: 947 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n", 948 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 949 address); 950 break; 951 case EVENT_TYPE_INV_DEV_REQ: 952 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", 953 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 954 pasid, address, flags); 955 break; 956 case EVENT_TYPE_RMP_FAULT: 957 amd_iommu_report_rmp_fault(iommu, event); 958 break; 959 case EVENT_TYPE_RMP_HW_ERR: 960 amd_iommu_report_rmp_hw_error(iommu, event); 961 break; 962 case EVENT_TYPE_INV_PPR_REQ: 963 pasid = PPR_PASID(*((u64 *)__evt)); 964 tag = event[1] & 0x03FF; 965 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", 966 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 967 pasid, address, flags, tag); 968 break; 969 default: 970 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", 971 event[0], event[1], event[2], event[3]); 972 } 973 974 /* 975 * To detect the hardware errata 732 we need to clear the 976 * entry back to zero. This issue does not exist on SNP 977 * enabled system. Also this buffer is not writeable on 978 * SNP enabled system. 979 */ 980 if (!amd_iommu_snp_en) 981 memset(__evt, 0, 4 * sizeof(u32)); 982 } 983 984 static void iommu_poll_events(struct amd_iommu *iommu) 985 { 986 u32 head, tail; 987 988 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 989 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 990 991 while (head != tail) { 992 iommu_print_event(iommu, iommu->evt_buf + head); 993 994 /* Update head pointer of hardware ring-buffer */ 995 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; 996 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 997 } 998 999 } 1000 1001 #ifdef CONFIG_IRQ_REMAP 1002 static int (*iommu_ga_log_notifier)(u32); 1003 1004 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) 1005 { 1006 iommu_ga_log_notifier = notifier; 1007 1008 /* 1009 * Ensure all in-flight IRQ handlers run to completion before returning 1010 * to the caller, e.g. to ensure module code isn't unloaded while it's 1011 * being executed in the IRQ handler. 1012 */ 1013 if (!notifier) 1014 synchronize_rcu(); 1015 1016 return 0; 1017 } 1018 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); 1019 1020 static void iommu_poll_ga_log(struct amd_iommu *iommu) 1021 { 1022 u32 head, tail; 1023 1024 if (iommu->ga_log == NULL) 1025 return; 1026 1027 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 1028 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 1029 1030 while (head != tail) { 1031 volatile u64 *raw; 1032 u64 log_entry; 1033 1034 raw = (u64 *)(iommu->ga_log + head); 1035 1036 /* Avoid memcpy function-call overhead */ 1037 log_entry = *raw; 1038 1039 /* Update head pointer of hardware ring-buffer */ 1040 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE; 1041 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 1042 1043 /* Handle GA entry */ 1044 switch (GA_REQ_TYPE(log_entry)) { 1045 case GA_GUEST_NR: 1046 if (!iommu_ga_log_notifier) 1047 break; 1048 1049 pr_debug("%s: devid=%#x, ga_tag=%#x\n", 1050 __func__, GA_DEVID(log_entry), 1051 GA_TAG(log_entry)); 1052 1053 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0) 1054 pr_err("GA log notifier failed.\n"); 1055 break; 1056 default: 1057 break; 1058 } 1059 } 1060 } 1061 1062 static void 1063 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) 1064 { 1065 if (!irq_remapping_enabled || !dev_is_pci(dev) || 1066 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev))) 1067 return; 1068 1069 dev_set_msi_domain(dev, iommu->ir_domain); 1070 } 1071 1072 #else /* CONFIG_IRQ_REMAP */ 1073 static inline void 1074 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } 1075 #endif /* !CONFIG_IRQ_REMAP */ 1076 1077 static void amd_iommu_handle_irq(void *data, const char *evt_type, 1078 u32 int_mask, u32 overflow_mask, 1079 void (*int_handler)(struct amd_iommu *), 1080 void (*overflow_handler)(struct amd_iommu *)) 1081 { 1082 struct amd_iommu *iommu = (struct amd_iommu *) data; 1083 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 1084 u32 mask = int_mask | overflow_mask; 1085 1086 while (status & mask) { 1087 /* Enable interrupt sources again */ 1088 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 1089 1090 if (int_handler) { 1091 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", 1092 iommu->index, evt_type); 1093 int_handler(iommu); 1094 } 1095 1096 if ((status & overflow_mask) && overflow_handler) 1097 overflow_handler(iommu); 1098 1099 /* 1100 * Hardware bug: ERBT1312 1101 * When re-enabling interrupt (by writing 1 1102 * to clear the bit), the hardware might also try to set 1103 * the interrupt bit in the event status register. 1104 * In this scenario, the bit will be set, and disable 1105 * subsequent interrupts. 1106 * 1107 * Workaround: The IOMMU driver should read back the 1108 * status register and check if the interrupt bits are cleared. 1109 * If not, driver will need to go through the interrupt handler 1110 * again and re-clear the bits 1111 */ 1112 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 1113 } 1114 } 1115 1116 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data) 1117 { 1118 amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK, 1119 MMIO_STATUS_EVT_OVERFLOW_MASK, 1120 iommu_poll_events, amd_iommu_restart_event_logging); 1121 1122 return IRQ_HANDLED; 1123 } 1124 1125 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) 1126 { 1127 amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, 1128 MMIO_STATUS_PPR_OVERFLOW_MASK, 1129 amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log); 1130 1131 return IRQ_HANDLED; 1132 } 1133 1134 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data) 1135 { 1136 #ifdef CONFIG_IRQ_REMAP 1137 amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK, 1138 MMIO_STATUS_GALOG_OVERFLOW_MASK, 1139 iommu_poll_ga_log, amd_iommu_restart_ga_log); 1140 #endif 1141 1142 return IRQ_HANDLED; 1143 } 1144 1145 irqreturn_t amd_iommu_int_thread(int irq, void *data) 1146 { 1147 amd_iommu_int_thread_evtlog(irq, data); 1148 amd_iommu_int_thread_pprlog(irq, data); 1149 amd_iommu_int_thread_galog(irq, data); 1150 1151 return IRQ_HANDLED; 1152 } 1153 1154 irqreturn_t amd_iommu_int_handler(int irq, void *data) 1155 { 1156 return IRQ_WAKE_THREAD; 1157 } 1158 1159 /**************************************************************************** 1160 * 1161 * IOMMU command queuing functions 1162 * 1163 ****************************************************************************/ 1164 1165 static void dump_command_buffer(struct amd_iommu *iommu) 1166 { 1167 struct iommu_cmd *cmd; 1168 u32 head, tail; 1169 int i; 1170 1171 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 1172 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 1173 1174 pr_err("CMD Buffer head=%llu tail=%llu\n", MMIO_CMD_BUFFER_HEAD(head), 1175 MMIO_CMD_BUFFER_TAIL(tail)); 1176 1177 for (i = 0; i < CMD_BUFFER_ENTRIES; i++) { 1178 cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd)); 1179 pr_err("%3d: %08x %08x %08x %08x\n", i, cmd->data[0], cmd->data[1], cmd->data[2], 1180 cmd->data[3]); 1181 } 1182 } 1183 1184 static int wait_on_sem(struct amd_iommu *iommu, u64 data) 1185 { 1186 int i = 0; 1187 1188 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { 1189 udelay(1); 1190 i += 1; 1191 } 1192 1193 if (i == LOOP_TIMEOUT) { 1194 1195 pr_alert("IOMMU %04x:%02x:%02x.%01x: Completion-Wait loop timed out\n", 1196 iommu->pci_seg->id, PCI_BUS_NUM(iommu->devid), 1197 PCI_SLOT(iommu->devid), PCI_FUNC(iommu->devid)); 1198 1199 if (amd_iommu_dump) 1200 DO_ONCE_LITE(dump_command_buffer, iommu); 1201 1202 return -EIO; 1203 } 1204 1205 return 0; 1206 } 1207 1208 static void copy_cmd_to_buffer(struct amd_iommu *iommu, 1209 struct iommu_cmd *cmd) 1210 { 1211 u8 *target; 1212 u32 tail; 1213 1214 /* Copy command to buffer */ 1215 tail = iommu->cmd_buf_tail; 1216 target = iommu->cmd_buf + tail; 1217 memcpy(target, cmd, sizeof(*cmd)); 1218 1219 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; 1220 iommu->cmd_buf_tail = tail; 1221 1222 /* Tell the IOMMU about it */ 1223 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 1224 } 1225 1226 static void build_completion_wait(struct iommu_cmd *cmd, 1227 struct amd_iommu *iommu, 1228 u64 data) 1229 { 1230 u64 paddr = iommu->cmd_sem_paddr; 1231 1232 memset(cmd, 0, sizeof(*cmd)); 1233 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; 1234 cmd->data[1] = upper_32_bits(paddr); 1235 cmd->data[2] = lower_32_bits(data); 1236 cmd->data[3] = upper_32_bits(data); 1237 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); 1238 } 1239 1240 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) 1241 { 1242 memset(cmd, 0, sizeof(*cmd)); 1243 cmd->data[0] = devid; 1244 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); 1245 } 1246 1247 /* 1248 * Builds an invalidation address which is suitable for one page or multiple 1249 * pages. Sets the size bit (S) as needed is more than one page is flushed. 1250 */ 1251 static inline u64 build_inv_address(u64 address, size_t size) 1252 { 1253 u64 pages, end, msb_diff; 1254 1255 pages = iommu_num_pages(address, size, PAGE_SIZE); 1256 1257 if (pages == 1) 1258 return address & PAGE_MASK; 1259 1260 end = address + size - 1; 1261 1262 /* 1263 * msb_diff would hold the index of the most significant bit that 1264 * flipped between the start and end. 1265 */ 1266 msb_diff = fls64(end ^ address) - 1; 1267 1268 /* 1269 * Bits 63:52 are sign extended. If for some reason bit 51 is different 1270 * between the start and the end, invalidate everything. 1271 */ 1272 if (unlikely(msb_diff > 51)) { 1273 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 1274 } else { 1275 /* 1276 * The msb-bit must be clear on the address. Just set all the 1277 * lower bits. 1278 */ 1279 address |= (1ull << msb_diff) - 1; 1280 } 1281 1282 /* Clear bits 11:0 */ 1283 address &= PAGE_MASK; 1284 1285 /* Set the size bit - we flush more than one 4kb page */ 1286 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK; 1287 } 1288 1289 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, 1290 size_t size, u16 domid, 1291 ioasid_t pasid, bool gn) 1292 { 1293 u64 inv_address = build_inv_address(address, size); 1294 1295 memset(cmd, 0, sizeof(*cmd)); 1296 1297 cmd->data[1] |= domid; 1298 cmd->data[2] = lower_32_bits(inv_address); 1299 cmd->data[3] = upper_32_bits(inv_address); 1300 /* PDE bit - we want to flush everything, not only the PTEs */ 1301 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 1302 if (gn) { 1303 cmd->data[0] |= pasid; 1304 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 1305 } 1306 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); 1307 } 1308 1309 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, 1310 u64 address, size_t size, 1311 ioasid_t pasid, bool gn) 1312 { 1313 u64 inv_address = build_inv_address(address, size); 1314 1315 memset(cmd, 0, sizeof(*cmd)); 1316 1317 cmd->data[0] = devid; 1318 cmd->data[0] |= (qdep & 0xff) << 24; 1319 cmd->data[1] = devid; 1320 cmd->data[2] = lower_32_bits(inv_address); 1321 cmd->data[3] = upper_32_bits(inv_address); 1322 if (gn) { 1323 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; 1324 cmd->data[1] |= (pasid & 0xff) << 16; 1325 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 1326 } 1327 1328 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); 1329 } 1330 1331 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid, 1332 int status, int tag, u8 gn) 1333 { 1334 memset(cmd, 0, sizeof(*cmd)); 1335 1336 cmd->data[0] = devid; 1337 if (gn) { 1338 cmd->data[1] = pasid; 1339 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; 1340 } 1341 cmd->data[3] = tag & 0x1ff; 1342 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; 1343 1344 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR); 1345 } 1346 1347 static void build_inv_all(struct iommu_cmd *cmd) 1348 { 1349 memset(cmd, 0, sizeof(*cmd)); 1350 CMD_SET_TYPE(cmd, CMD_INV_ALL); 1351 } 1352 1353 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) 1354 { 1355 memset(cmd, 0, sizeof(*cmd)); 1356 cmd->data[0] = devid; 1357 CMD_SET_TYPE(cmd, CMD_INV_IRT); 1358 } 1359 1360 /* 1361 * Writes the command to the IOMMUs command buffer and informs the 1362 * hardware about the new command. 1363 */ 1364 static int __iommu_queue_command_sync(struct amd_iommu *iommu, 1365 struct iommu_cmd *cmd, 1366 bool sync) 1367 { 1368 unsigned int count = 0; 1369 u32 left, next_tail; 1370 1371 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; 1372 again: 1373 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; 1374 1375 if (left <= 0x20) { 1376 /* Skip udelay() the first time around */ 1377 if (count++) { 1378 if (count == LOOP_TIMEOUT) { 1379 pr_err("Command buffer timeout\n"); 1380 return -EIO; 1381 } 1382 1383 udelay(1); 1384 } 1385 1386 /* Update head and recheck remaining space */ 1387 iommu->cmd_buf_head = readl(iommu->mmio_base + 1388 MMIO_CMD_HEAD_OFFSET); 1389 1390 goto again; 1391 } 1392 1393 copy_cmd_to_buffer(iommu, cmd); 1394 1395 /* Do we need to make sure all commands are processed? */ 1396 iommu->need_sync = sync; 1397 1398 return 0; 1399 } 1400 1401 static int iommu_queue_command_sync(struct amd_iommu *iommu, 1402 struct iommu_cmd *cmd, 1403 bool sync) 1404 { 1405 unsigned long flags; 1406 int ret; 1407 1408 raw_spin_lock_irqsave(&iommu->lock, flags); 1409 ret = __iommu_queue_command_sync(iommu, cmd, sync); 1410 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1411 1412 return ret; 1413 } 1414 1415 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 1416 { 1417 return iommu_queue_command_sync(iommu, cmd, true); 1418 } 1419 1420 /* 1421 * This function queues a completion wait command into the command 1422 * buffer of an IOMMU 1423 */ 1424 static int iommu_completion_wait(struct amd_iommu *iommu) 1425 { 1426 struct iommu_cmd cmd; 1427 unsigned long flags; 1428 int ret; 1429 u64 data; 1430 1431 if (!iommu->need_sync) 1432 return 0; 1433 1434 data = atomic64_inc_return(&iommu->cmd_sem_val); 1435 build_completion_wait(&cmd, iommu, data); 1436 1437 raw_spin_lock_irqsave(&iommu->lock, flags); 1438 1439 ret = __iommu_queue_command_sync(iommu, &cmd, false); 1440 if (ret) 1441 goto out_unlock; 1442 1443 ret = wait_on_sem(iommu, data); 1444 1445 out_unlock: 1446 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1447 1448 return ret; 1449 } 1450 1451 static void domain_flush_complete(struct protection_domain *domain) 1452 { 1453 struct pdom_iommu_info *pdom_iommu_info; 1454 unsigned long i; 1455 1456 lockdep_assert_held(&domain->lock); 1457 1458 /* 1459 * Devices of this domain are behind this IOMMU 1460 * We need to wait for completion of all commands. 1461 */ 1462 xa_for_each(&domain->iommu_array, i, pdom_iommu_info) 1463 iommu_completion_wait(pdom_iommu_info->iommu); 1464 } 1465 1466 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) 1467 { 1468 struct iommu_cmd cmd; 1469 1470 build_inv_dte(&cmd, devid); 1471 1472 return iommu_queue_command(iommu, &cmd); 1473 } 1474 1475 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) 1476 { 1477 int ret; 1478 1479 ret = iommu_flush_dte(iommu, devid); 1480 if (!ret) 1481 iommu_completion_wait(iommu); 1482 } 1483 1484 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) 1485 { 1486 u32 devid; 1487 u16 last_bdf = iommu->pci_seg->last_bdf; 1488 1489 for (devid = 0; devid <= last_bdf; ++devid) 1490 iommu_flush_dte(iommu, devid); 1491 1492 iommu_completion_wait(iommu); 1493 } 1494 1495 /* 1496 * This function uses heavy locking and may disable irqs for some time. But 1497 * this is no issue because it is only called during resume. 1498 */ 1499 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) 1500 { 1501 u32 dom_id; 1502 u16 last_bdf = iommu->pci_seg->last_bdf; 1503 1504 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) { 1505 struct iommu_cmd cmd; 1506 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1507 dom_id, IOMMU_NO_PASID, false); 1508 iommu_queue_command(iommu, &cmd); 1509 } 1510 1511 iommu_completion_wait(iommu); 1512 } 1513 1514 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) 1515 { 1516 struct iommu_cmd cmd; 1517 1518 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1519 dom_id, IOMMU_NO_PASID, false); 1520 iommu_queue_command(iommu, &cmd); 1521 1522 iommu_completion_wait(iommu); 1523 } 1524 1525 static void amd_iommu_flush_all(struct amd_iommu *iommu) 1526 { 1527 struct iommu_cmd cmd; 1528 1529 build_inv_all(&cmd); 1530 1531 iommu_queue_command(iommu, &cmd); 1532 iommu_completion_wait(iommu); 1533 } 1534 1535 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) 1536 { 1537 struct iommu_cmd cmd; 1538 1539 build_inv_irt(&cmd, devid); 1540 1541 iommu_queue_command(iommu, &cmd); 1542 } 1543 1544 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) 1545 { 1546 u32 devid; 1547 u16 last_bdf = iommu->pci_seg->last_bdf; 1548 1549 if (iommu->irtcachedis_enabled) 1550 return; 1551 1552 for (devid = 0; devid <= last_bdf; devid++) 1553 iommu_flush_irt(iommu, devid); 1554 1555 iommu_completion_wait(iommu); 1556 } 1557 1558 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) 1559 { 1560 if (check_feature(FEATURE_IA)) { 1561 amd_iommu_flush_all(iommu); 1562 } else { 1563 amd_iommu_flush_dte_all(iommu); 1564 amd_iommu_flush_irt_all(iommu); 1565 amd_iommu_flush_tlb_all(iommu); 1566 } 1567 } 1568 1569 /* 1570 * Command send function for flushing on-device TLB 1571 */ 1572 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, 1573 size_t size, ioasid_t pasid, bool gn) 1574 { 1575 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 1576 struct iommu_cmd cmd; 1577 int qdep = dev_data->ats_qdep; 1578 1579 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, 1580 size, pasid, gn); 1581 1582 return iommu_queue_command(iommu, &cmd); 1583 } 1584 1585 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) 1586 { 1587 struct amd_iommu *iommu = data; 1588 1589 return iommu_flush_dte(iommu, alias); 1590 } 1591 1592 /* 1593 * Command send function for invalidating a device table entry 1594 */ 1595 static int device_flush_dte(struct iommu_dev_data *dev_data) 1596 { 1597 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 1598 struct pci_dev *pdev = NULL; 1599 struct amd_iommu_pci_seg *pci_seg; 1600 u16 alias; 1601 int ret; 1602 1603 if (dev_is_pci(dev_data->dev)) 1604 pdev = to_pci_dev(dev_data->dev); 1605 1606 if (pdev) 1607 ret = pci_for_each_dma_alias(pdev, 1608 device_flush_dte_alias, iommu); 1609 else 1610 ret = iommu_flush_dte(iommu, dev_data->devid); 1611 if (ret) 1612 return ret; 1613 1614 pci_seg = iommu->pci_seg; 1615 alias = pci_seg->alias_table[dev_data->devid]; 1616 if (alias != dev_data->devid) { 1617 ret = iommu_flush_dte(iommu, alias); 1618 if (ret) 1619 return ret; 1620 } 1621 1622 if (dev_data->ats_enabled) { 1623 /* Invalidate the entire contents of an IOTLB */ 1624 ret = device_flush_iotlb(dev_data, 0, ~0UL, 1625 IOMMU_NO_PASID, false); 1626 } 1627 1628 return ret; 1629 } 1630 1631 static int domain_flush_pages_v2(struct protection_domain *pdom, 1632 u64 address, size_t size) 1633 { 1634 struct iommu_dev_data *dev_data; 1635 struct iommu_cmd cmd; 1636 int ret = 0; 1637 1638 lockdep_assert_held(&pdom->lock); 1639 list_for_each_entry(dev_data, &pdom->dev_list, list) { 1640 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 1641 u16 domid = dev_data->gcr3_info.domid; 1642 1643 build_inv_iommu_pages(&cmd, address, size, 1644 domid, IOMMU_NO_PASID, true); 1645 1646 ret |= iommu_queue_command(iommu, &cmd); 1647 } 1648 1649 return ret; 1650 } 1651 1652 static int domain_flush_pages_v1(struct protection_domain *pdom, 1653 u64 address, size_t size) 1654 { 1655 struct pdom_iommu_info *pdom_iommu_info; 1656 struct iommu_cmd cmd; 1657 int ret = 0; 1658 unsigned long i; 1659 1660 lockdep_assert_held(&pdom->lock); 1661 1662 build_inv_iommu_pages(&cmd, address, size, 1663 pdom->id, IOMMU_NO_PASID, false); 1664 1665 xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { 1666 /* 1667 * Devices of this domain are behind this IOMMU 1668 * We need a TLB flush 1669 */ 1670 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); 1671 } 1672 1673 return ret; 1674 } 1675 1676 /* 1677 * TLB invalidation function which is called from the mapping functions. 1678 * It flushes range of PTEs of the domain. 1679 */ 1680 static void __domain_flush_pages(struct protection_domain *domain, 1681 u64 address, size_t size) 1682 { 1683 struct iommu_dev_data *dev_data; 1684 int ret = 0; 1685 ioasid_t pasid = IOMMU_NO_PASID; 1686 bool gn = false; 1687 1688 lockdep_assert_held(&domain->lock); 1689 1690 if (pdom_is_v2_pgtbl_mode(domain)) { 1691 gn = true; 1692 ret = domain_flush_pages_v2(domain, address, size); 1693 } else { 1694 ret = domain_flush_pages_v1(domain, address, size); 1695 } 1696 1697 list_for_each_entry(dev_data, &domain->dev_list, list) { 1698 1699 if (!dev_data->ats_enabled) 1700 continue; 1701 1702 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); 1703 } 1704 1705 WARN_ON(ret); 1706 } 1707 1708 void amd_iommu_domain_flush_pages(struct protection_domain *domain, 1709 u64 address, size_t size) 1710 { 1711 lockdep_assert_held(&domain->lock); 1712 1713 if (likely(!amd_iommu_np_cache)) { 1714 __domain_flush_pages(domain, address, size); 1715 1716 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ 1717 domain_flush_complete(domain); 1718 1719 return; 1720 } 1721 1722 /* 1723 * When NpCache is on, we infer that we run in a VM and use a vIOMMU. 1724 * In such setups it is best to avoid flushes of ranges which are not 1725 * naturally aligned, since it would lead to flushes of unmodified 1726 * PTEs. Such flushes would require the hypervisor to do more work than 1727 * necessary. Therefore, perform repeated flushes of aligned ranges 1728 * until you cover the range. Each iteration flushes the smaller 1729 * between the natural alignment of the address that we flush and the 1730 * greatest naturally aligned region that fits in the range. 1731 */ 1732 while (size != 0) { 1733 int addr_alignment = __ffs(address); 1734 int size_alignment = __fls(size); 1735 int min_alignment; 1736 size_t flush_size; 1737 1738 /* 1739 * size is always non-zero, but address might be zero, causing 1740 * addr_alignment to be negative. As the casting of the 1741 * argument in __ffs(address) to long might trim the high bits 1742 * of the address on x86-32, cast to long when doing the check. 1743 */ 1744 if (likely((unsigned long)address != 0)) 1745 min_alignment = min(addr_alignment, size_alignment); 1746 else 1747 min_alignment = size_alignment; 1748 1749 flush_size = 1ul << min_alignment; 1750 1751 __domain_flush_pages(domain, address, flush_size); 1752 address += flush_size; 1753 size -= flush_size; 1754 } 1755 1756 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ 1757 domain_flush_complete(domain); 1758 } 1759 1760 /* Flush the whole IO/TLB for a given protection domain - including PDE */ 1761 static void amd_iommu_domain_flush_all(struct protection_domain *domain) 1762 { 1763 amd_iommu_domain_flush_pages(domain, 0, 1764 CMD_INV_IOMMU_ALL_PAGES_ADDRESS); 1765 } 1766 1767 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, 1768 ioasid_t pasid, u64 address, size_t size) 1769 { 1770 struct iommu_cmd cmd; 1771 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 1772 1773 build_inv_iommu_pages(&cmd, address, size, 1774 dev_data->gcr3_info.domid, pasid, true); 1775 iommu_queue_command(iommu, &cmd); 1776 1777 if (dev_data->ats_enabled) 1778 device_flush_iotlb(dev_data, address, size, pasid, true); 1779 1780 iommu_completion_wait(iommu); 1781 } 1782 1783 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, 1784 ioasid_t pasid) 1785 { 1786 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, 1787 CMD_INV_IOMMU_ALL_PAGES_ADDRESS); 1788 } 1789 1790 int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) 1791 { 1792 struct iommu_dev_data *dev_data; 1793 struct amd_iommu *iommu; 1794 struct iommu_cmd cmd; 1795 1796 dev_data = dev_iommu_priv_get(dev); 1797 iommu = get_amd_iommu_from_dev(dev); 1798 1799 build_complete_ppr(&cmd, dev_data->devid, pasid, status, 1800 tag, dev_data->pri_tlp); 1801 1802 return iommu_queue_command(iommu, &cmd); 1803 } 1804 1805 /**************************************************************************** 1806 * 1807 * The next functions belong to the domain allocation. A domain is 1808 * allocated for every IOMMU as the default domain. If device isolation 1809 * is enabled, every device get its own domain. The most important thing 1810 * about domains is the page table mapping the DMA address space they 1811 * contain. 1812 * 1813 ****************************************************************************/ 1814 int amd_iommu_pdom_id_alloc(void) 1815 { 1816 return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); 1817 } 1818 1819 int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp) 1820 { 1821 return ida_alloc_range(&pdom_ids, id, id, gfp); 1822 } 1823 1824 void amd_iommu_pdom_id_free(int id) 1825 { 1826 ida_free(&pdom_ids, id); 1827 } 1828 1829 void amd_iommu_pdom_id_destroy(void) 1830 { 1831 ida_destroy(&pdom_ids); 1832 } 1833 1834 static void free_gcr3_tbl_level1(u64 *tbl) 1835 { 1836 u64 *ptr; 1837 int i; 1838 1839 for (i = 0; i < 512; ++i) { 1840 if (!(tbl[i] & GCR3_VALID)) 1841 continue; 1842 1843 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); 1844 1845 iommu_free_pages(ptr); 1846 } 1847 } 1848 1849 static void free_gcr3_tbl_level2(u64 *tbl) 1850 { 1851 u64 *ptr; 1852 int i; 1853 1854 for (i = 0; i < 512; ++i) { 1855 if (!(tbl[i] & GCR3_VALID)) 1856 continue; 1857 1858 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); 1859 1860 free_gcr3_tbl_level1(ptr); 1861 } 1862 } 1863 1864 static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) 1865 { 1866 if (gcr3_info->glx == 2) 1867 free_gcr3_tbl_level2(gcr3_info->gcr3_tbl); 1868 else if (gcr3_info->glx == 1) 1869 free_gcr3_tbl_level1(gcr3_info->gcr3_tbl); 1870 else 1871 WARN_ON_ONCE(gcr3_info->glx != 0); 1872 1873 gcr3_info->glx = 0; 1874 1875 /* Free per device domain ID */ 1876 amd_iommu_pdom_id_free(gcr3_info->domid); 1877 1878 iommu_free_pages(gcr3_info->gcr3_tbl); 1879 gcr3_info->gcr3_tbl = NULL; 1880 } 1881 1882 /* 1883 * Number of GCR3 table levels required. Level must be 4-Kbyte 1884 * page and can contain up to 512 entries. 1885 */ 1886 static int get_gcr3_levels(int pasids) 1887 { 1888 int levels; 1889 1890 if (pasids == -1) 1891 return amd_iommu_max_glx_val; 1892 1893 levels = get_count_order(pasids); 1894 1895 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; 1896 } 1897 1898 static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, 1899 struct amd_iommu *iommu, int pasids) 1900 { 1901 int levels = get_gcr3_levels(pasids); 1902 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; 1903 int domid; 1904 1905 if (levels > amd_iommu_max_glx_val) 1906 return -EINVAL; 1907 1908 if (gcr3_info->gcr3_tbl) 1909 return -EBUSY; 1910 1911 /* Allocate per device domain ID */ 1912 domid = amd_iommu_pdom_id_alloc(); 1913 if (domid <= 0) 1914 return -ENOSPC; 1915 gcr3_info->domid = domid; 1916 1917 gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K); 1918 if (gcr3_info->gcr3_tbl == NULL) { 1919 amd_iommu_pdom_id_free(domid); 1920 return -ENOMEM; 1921 } 1922 1923 gcr3_info->glx = levels; 1924 1925 return 0; 1926 } 1927 1928 static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info, 1929 ioasid_t pasid, bool alloc) 1930 { 1931 int index; 1932 u64 *pte; 1933 u64 *root = gcr3_info->gcr3_tbl; 1934 int level = gcr3_info->glx; 1935 1936 while (true) { 1937 1938 index = (pasid >> (9 * level)) & 0x1ff; 1939 pte = &root[index]; 1940 1941 if (level == 0) 1942 break; 1943 1944 if (!(*pte & GCR3_VALID)) { 1945 if (!alloc) 1946 return NULL; 1947 1948 root = (void *)get_zeroed_page(GFP_ATOMIC); 1949 if (root == NULL) 1950 return NULL; 1951 1952 *pte = iommu_virt_to_phys(root) | GCR3_VALID; 1953 } 1954 1955 root = iommu_phys_to_virt(*pte & PAGE_MASK); 1956 1957 level -= 1; 1958 } 1959 1960 return pte; 1961 } 1962 1963 static int update_gcr3(struct iommu_dev_data *dev_data, 1964 ioasid_t pasid, unsigned long gcr3, bool set) 1965 { 1966 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 1967 u64 *pte; 1968 1969 pte = __get_gcr3_pte(gcr3_info, pasid, true); 1970 if (pte == NULL) 1971 return -ENOMEM; 1972 1973 if (set) 1974 *pte = (gcr3 & PAGE_MASK) | GCR3_VALID; 1975 else 1976 *pte = 0; 1977 1978 dev_flush_pasid_all(dev_data, pasid); 1979 return 0; 1980 } 1981 1982 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, 1983 unsigned long gcr3) 1984 { 1985 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 1986 int ret; 1987 1988 iommu_group_mutex_assert(dev_data->dev); 1989 1990 ret = update_gcr3(dev_data, pasid, gcr3, true); 1991 if (ret) 1992 return ret; 1993 1994 gcr3_info->pasid_cnt++; 1995 return ret; 1996 } 1997 1998 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) 1999 { 2000 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2001 int ret; 2002 2003 iommu_group_mutex_assert(dev_data->dev); 2004 2005 ret = update_gcr3(dev_data, pasid, 0, false); 2006 if (ret) 2007 return ret; 2008 2009 gcr3_info->pasid_cnt--; 2010 return ret; 2011 } 2012 2013 static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, 2014 struct dev_table_entry *new) 2015 { 2016 /* All existing DTE must have V bit set */ 2017 new->data128[0] = DTE_FLAG_V; 2018 new->data128[1] = 0; 2019 } 2020 2021 /* 2022 * Note: 2023 * The old value for GCR3 table and GPT have been cleared from caller. 2024 */ 2025 static void set_dte_gcr3_table(struct amd_iommu *iommu, 2026 struct iommu_dev_data *dev_data, 2027 struct dev_table_entry *target) 2028 { 2029 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2030 u64 gcr3; 2031 2032 if (!gcr3_info->gcr3_tbl) 2033 return; 2034 2035 pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n", 2036 __func__, dev_data->devid, gcr3_info->glx, 2037 (unsigned long long)gcr3_info->gcr3_tbl); 2038 2039 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl); 2040 2041 target->data[0] |= DTE_FLAG_GV | 2042 FIELD_PREP(DTE_GLX, gcr3_info->glx) | 2043 FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12); 2044 if (pdom_is_v2_pgtbl_mode(dev_data->domain)) 2045 target->data[0] |= DTE_FLAG_GIOV; 2046 2047 target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) | 2048 FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31); 2049 2050 /* Guest page table can only support 4 and 5 levels */ 2051 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) 2052 target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL); 2053 else 2054 target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL); 2055 } 2056 2057 static void set_dte_entry(struct amd_iommu *iommu, 2058 struct iommu_dev_data *dev_data, 2059 phys_addr_t top_paddr, unsigned int top_level) 2060 { 2061 u16 domid; 2062 u32 old_domid; 2063 struct dev_table_entry *initial_dte; 2064 struct dev_table_entry new = {}; 2065 struct protection_domain *domain = dev_data->domain; 2066 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2067 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; 2068 struct pt_iommu_amdv1_hw_info pt_info; 2069 2070 make_clear_dte(dev_data, dte, &new); 2071 2072 if (gcr3_info && gcr3_info->gcr3_tbl) 2073 domid = dev_data->gcr3_info.domid; 2074 else { 2075 domid = domain->id; 2076 2077 if (domain->domain.type & __IOMMU_DOMAIN_PAGING) { 2078 /* 2079 * When updating the IO pagetable, the new top and level 2080 * are provided as parameters. For other operations i.e. 2081 * device attach, retrieve the current pagetable info 2082 * via the IOMMU PT API. 2083 */ 2084 if (top_paddr) { 2085 pt_info.host_pt_root = top_paddr; 2086 pt_info.mode = top_level + 1; 2087 } else { 2088 WARN_ON(top_paddr || top_level); 2089 pt_iommu_amdv1_hw_info(&domain->amdv1, 2090 &pt_info); 2091 } 2092 2093 new.data[0] |= __sme_set(pt_info.host_pt_root) | 2094 (pt_info.mode & DEV_ENTRY_MODE_MASK) 2095 << DEV_ENTRY_MODE_SHIFT; 2096 } 2097 } 2098 2099 new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; 2100 2101 /* 2102 * When SNP is enabled, we can only support TV=1 with non-zero domain ID. 2103 * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in 2104 * do_iommu_domain_alloc(). 2105 */ 2106 WARN_ON(amd_iommu_snp_en && (domid == 0)); 2107 new.data[0] |= DTE_FLAG_TV; 2108 2109 if (dev_data->ppr) 2110 new.data[0] |= 1ULL << DEV_ENTRY_PPR; 2111 2112 if (domain->dirty_tracking) 2113 new.data[0] |= DTE_FLAG_HAD; 2114 2115 if (dev_data->ats_enabled) 2116 new.data[1] |= DTE_FLAG_IOTLB; 2117 2118 old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK; 2119 new.data[1] |= domid; 2120 2121 /* 2122 * Restore cached persistent DTE bits, which can be set by information 2123 * in IVRS table. See set_dev_entry_from_acpi(). 2124 */ 2125 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); 2126 if (initial_dte) { 2127 new.data128[0] |= initial_dte->data128[0]; 2128 new.data128[1] |= initial_dte->data128[1]; 2129 } 2130 2131 set_dte_gcr3_table(iommu, dev_data, &new); 2132 2133 update_dte256(iommu, dev_data, &new); 2134 2135 /* 2136 * A kdump kernel might be replacing a domain ID that was copied from 2137 * the previous kernel--if so, it needs to flush the translation cache 2138 * entries for the old domain ID that is being overwritten 2139 */ 2140 if (old_domid) { 2141 amd_iommu_flush_tlb_domid(iommu, old_domid); 2142 } 2143 } 2144 2145 /* 2146 * Clear DMA-remap related flags to block all DMA (blockeded domain) 2147 */ 2148 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) 2149 { 2150 struct dev_table_entry new = {}; 2151 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; 2152 2153 make_clear_dte(dev_data, dte, &new); 2154 update_dte256(iommu, dev_data, &new); 2155 } 2156 2157 /* Update and flush DTE for the given device */ 2158 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) 2159 { 2160 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 2161 2162 if (set) 2163 set_dte_entry(iommu, dev_data, 0, 0); 2164 else 2165 clear_dte_entry(iommu, dev_data); 2166 2167 clone_aliases(iommu, dev_data->dev); 2168 device_flush_dte(dev_data); 2169 iommu_completion_wait(iommu); 2170 } 2171 2172 /* 2173 * If domain is SVA capable then initialize GCR3 table. Also if domain is 2174 * in v2 page table mode then update GCR3[0]. 2175 */ 2176 static int init_gcr3_table(struct iommu_dev_data *dev_data, 2177 struct protection_domain *pdom) 2178 { 2179 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2180 int max_pasids = dev_data->max_pasids; 2181 struct pt_iommu_x86_64_hw_info pt_info; 2182 int ret = 0; 2183 2184 /* 2185 * If domain is in pt mode then setup GCR3 table only if device 2186 * is PASID capable 2187 */ 2188 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) 2189 return ret; 2190 2191 /* 2192 * By default, setup GCR3 table to support MAX PASIDs 2193 * supported by the device/IOMMU. 2194 */ 2195 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 2196 max_pasids > 0 ? max_pasids : 1); 2197 if (ret) 2198 return ret; 2199 2200 /* Setup GCR3[0] only if domain is setup with v2 page table mode */ 2201 if (!pdom_is_v2_pgtbl_mode(pdom)) 2202 return ret; 2203 2204 pt_iommu_x86_64_hw_info(&pdom->amdv2, &pt_info); 2205 ret = update_gcr3(dev_data, 0, __sme_set(pt_info.gcr3_pt), true); 2206 if (ret) 2207 free_gcr3_table(&dev_data->gcr3_info); 2208 2209 return ret; 2210 } 2211 2212 static void destroy_gcr3_table(struct iommu_dev_data *dev_data, 2213 struct protection_domain *pdom) 2214 { 2215 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2216 2217 if (pdom_is_v2_pgtbl_mode(pdom)) 2218 update_gcr3(dev_data, 0, 0, false); 2219 2220 if (gcr3_info->gcr3_tbl == NULL) 2221 return; 2222 2223 free_gcr3_table(gcr3_info); 2224 } 2225 2226 static int pdom_attach_iommu(struct amd_iommu *iommu, 2227 struct protection_domain *pdom) 2228 { 2229 struct pdom_iommu_info *pdom_iommu_info, *curr; 2230 unsigned long flags; 2231 int ret = 0; 2232 2233 spin_lock_irqsave(&pdom->lock, flags); 2234 2235 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); 2236 if (pdom_iommu_info) { 2237 pdom_iommu_info->refcnt++; 2238 goto out_unlock; 2239 } 2240 2241 pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC); 2242 if (!pdom_iommu_info) { 2243 ret = -ENOMEM; 2244 goto out_unlock; 2245 } 2246 2247 pdom_iommu_info->iommu = iommu; 2248 pdom_iommu_info->refcnt = 1; 2249 2250 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, 2251 NULL, pdom_iommu_info, GFP_ATOMIC); 2252 if (curr) { 2253 kfree(pdom_iommu_info); 2254 ret = -ENOSPC; 2255 goto out_unlock; 2256 } 2257 2258 out_unlock: 2259 spin_unlock_irqrestore(&pdom->lock, flags); 2260 return ret; 2261 } 2262 2263 static void pdom_detach_iommu(struct amd_iommu *iommu, 2264 struct protection_domain *pdom) 2265 { 2266 struct pdom_iommu_info *pdom_iommu_info; 2267 unsigned long flags; 2268 2269 spin_lock_irqsave(&pdom->lock, flags); 2270 2271 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); 2272 if (!pdom_iommu_info) { 2273 spin_unlock_irqrestore(&pdom->lock, flags); 2274 return; 2275 } 2276 2277 pdom_iommu_info->refcnt--; 2278 if (pdom_iommu_info->refcnt == 0) { 2279 xa_erase(&pdom->iommu_array, iommu->index); 2280 kfree(pdom_iommu_info); 2281 } 2282 2283 spin_unlock_irqrestore(&pdom->lock, flags); 2284 } 2285 2286 /* 2287 * If a device is not yet associated with a domain, this function makes the 2288 * device visible in the domain 2289 */ 2290 static int attach_device(struct device *dev, 2291 struct protection_domain *domain) 2292 { 2293 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2294 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2295 struct pci_dev *pdev; 2296 unsigned long flags; 2297 int ret = 0; 2298 2299 mutex_lock(&dev_data->mutex); 2300 2301 if (dev_data->domain != NULL) { 2302 ret = -EBUSY; 2303 goto out; 2304 } 2305 2306 /* Do reference counting */ 2307 ret = pdom_attach_iommu(iommu, domain); 2308 if (ret) 2309 goto out; 2310 2311 /* Setup GCR3 table */ 2312 if (pdom_is_sva_capable(domain)) { 2313 ret = init_gcr3_table(dev_data, domain); 2314 if (ret) { 2315 pdom_detach_iommu(iommu, domain); 2316 goto out; 2317 } 2318 } 2319 2320 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; 2321 if (pdev && pdom_is_sva_capable(domain)) { 2322 pdev_enable_caps(pdev); 2323 2324 /* 2325 * Device can continue to function even if IOPF 2326 * enablement failed. Hence in error path just 2327 * disable device PRI support. 2328 */ 2329 if (amd_iommu_iopf_add_device(iommu, dev_data)) 2330 pdev_disable_cap_pri(pdev); 2331 } else if (pdev) { 2332 pdev_enable_cap_ats(pdev); 2333 } 2334 2335 /* Update data structures */ 2336 dev_data->domain = domain; 2337 spin_lock_irqsave(&domain->lock, flags); 2338 list_add(&dev_data->list, &domain->dev_list); 2339 spin_unlock_irqrestore(&domain->lock, flags); 2340 2341 /* Update device table */ 2342 dev_update_dte(dev_data, true); 2343 2344 out: 2345 mutex_unlock(&dev_data->mutex); 2346 2347 return ret; 2348 } 2349 2350 /* 2351 * Removes a device from a protection domain (with devtable_lock held) 2352 */ 2353 static void detach_device(struct device *dev) 2354 { 2355 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2356 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2357 struct protection_domain *domain = dev_data->domain; 2358 unsigned long flags; 2359 2360 mutex_lock(&dev_data->mutex); 2361 2362 /* 2363 * First check if the device is still attached. It might already 2364 * be detached from its domain because the generic 2365 * iommu_detach_group code detached it and we try again here in 2366 * our alias handling. 2367 */ 2368 if (WARN_ON(!dev_data->domain)) 2369 goto out; 2370 2371 /* Remove IOPF handler */ 2372 if (dev_data->ppr) { 2373 iopf_queue_flush_dev(dev); 2374 amd_iommu_iopf_remove_device(iommu, dev_data); 2375 } 2376 2377 if (dev_is_pci(dev)) 2378 pdev_disable_caps(to_pci_dev(dev)); 2379 2380 /* Clear DTE and flush the entry */ 2381 dev_update_dte(dev_data, false); 2382 2383 /* Flush IOTLB and wait for the flushes to finish */ 2384 spin_lock_irqsave(&domain->lock, flags); 2385 amd_iommu_domain_flush_all(domain); 2386 list_del(&dev_data->list); 2387 spin_unlock_irqrestore(&domain->lock, flags); 2388 2389 /* Clear GCR3 table */ 2390 if (pdom_is_sva_capable(domain)) 2391 destroy_gcr3_table(dev_data, domain); 2392 2393 /* Update data structures */ 2394 dev_data->domain = NULL; 2395 2396 /* decrease reference counters - needs to happen after the flushes */ 2397 pdom_detach_iommu(iommu, domain); 2398 2399 out: 2400 mutex_unlock(&dev_data->mutex); 2401 } 2402 2403 static struct iommu_device *amd_iommu_probe_device(struct device *dev) 2404 { 2405 struct iommu_device *iommu_dev; 2406 struct amd_iommu *iommu; 2407 struct iommu_dev_data *dev_data; 2408 int ret; 2409 2410 if (!check_device(dev)) 2411 return ERR_PTR(-ENODEV); 2412 2413 iommu = rlookup_amd_iommu(dev); 2414 if (!iommu) 2415 return ERR_PTR(-ENODEV); 2416 2417 /* Not registered yet? */ 2418 if (!iommu->iommu.ops) 2419 return ERR_PTR(-ENODEV); 2420 2421 if (dev_iommu_priv_get(dev)) 2422 return &iommu->iommu; 2423 2424 ret = iommu_init_device(iommu, dev); 2425 if (ret) { 2426 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); 2427 iommu_dev = ERR_PTR(ret); 2428 iommu_ignore_device(iommu, dev); 2429 goto out_err; 2430 } 2431 2432 amd_iommu_set_pci_msi_domain(dev, iommu); 2433 iommu_dev = &iommu->iommu; 2434 2435 /* 2436 * If IOMMU and device supports PASID then it will contain max 2437 * supported PASIDs, else it will be zero. 2438 */ 2439 dev_data = dev_iommu_priv_get(dev); 2440 if (amd_iommu_pasid_supported() && dev_is_pci(dev) && 2441 pdev_pasid_supported(dev_data)) { 2442 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, 2443 pci_max_pasids(to_pci_dev(dev))); 2444 } 2445 2446 if (amd_iommu_pgtable == PD_MODE_NONE) { 2447 pr_warn_once("%s: DMA translation not supported by iommu.\n", 2448 __func__); 2449 iommu_dev = ERR_PTR(-ENODEV); 2450 goto out_err; 2451 } 2452 2453 out_err: 2454 2455 iommu_completion_wait(iommu); 2456 2457 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) 2458 dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K; 2459 else 2460 dev_data->max_irqs = MAX_IRQS_PER_TABLE_512; 2461 2462 if (dev_is_pci(dev)) 2463 pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT); 2464 2465 return iommu_dev; 2466 } 2467 2468 static void amd_iommu_release_device(struct device *dev) 2469 { 2470 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2471 2472 WARN_ON(dev_data->domain); 2473 2474 /* 2475 * We keep dev_data around for unplugged devices and reuse it when the 2476 * device is re-plugged - not doing so would introduce a ton of races. 2477 */ 2478 } 2479 2480 static struct iommu_group *amd_iommu_device_group(struct device *dev) 2481 { 2482 if (dev_is_pci(dev)) 2483 return pci_device_group(dev); 2484 2485 return acpihid_device_group(dev); 2486 } 2487 2488 /***************************************************************************** 2489 * 2490 * The following functions belong to the exported interface of AMD IOMMU 2491 * 2492 * This interface allows access to lower level functions of the IOMMU 2493 * like protection domain handling and assignement of devices to domains 2494 * which is not possible with the dma_ops interface. 2495 * 2496 *****************************************************************************/ 2497 2498 static void protection_domain_init(struct protection_domain *domain) 2499 { 2500 spin_lock_init(&domain->lock); 2501 INIT_LIST_HEAD(&domain->dev_list); 2502 INIT_LIST_HEAD(&domain->dev_data_list); 2503 xa_init(&domain->iommu_array); 2504 } 2505 2506 struct protection_domain *protection_domain_alloc(void) 2507 { 2508 struct protection_domain *domain; 2509 int domid; 2510 2511 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2512 if (!domain) 2513 return NULL; 2514 2515 domid = amd_iommu_pdom_id_alloc(); 2516 if (domid <= 0) { 2517 kfree(domain); 2518 return NULL; 2519 } 2520 domain->id = domid; 2521 2522 protection_domain_init(domain); 2523 2524 return domain; 2525 } 2526 2527 static bool amd_iommu_hd_support(struct amd_iommu *iommu) 2528 { 2529 if (amd_iommu_hatdis) 2530 return false; 2531 2532 return iommu && (iommu->features & FEATURE_HDSUP); 2533 } 2534 2535 static spinlock_t *amd_iommu_get_top_lock(struct pt_iommu *iommupt) 2536 { 2537 struct protection_domain *pdom = 2538 container_of(iommupt, struct protection_domain, iommu); 2539 2540 return &pdom->lock; 2541 } 2542 2543 /* 2544 * Update all HW references to the domain with a new pgtable configuration. 2545 */ 2546 static void amd_iommu_change_top(struct pt_iommu *iommu_table, 2547 phys_addr_t top_paddr, unsigned int top_level) 2548 { 2549 struct protection_domain *pdom = 2550 container_of(iommu_table, struct protection_domain, iommu); 2551 struct iommu_dev_data *dev_data; 2552 2553 lockdep_assert_held(&pdom->lock); 2554 2555 /* Update the DTE for all devices attached to this domain */ 2556 list_for_each_entry(dev_data, &pdom->dev_list, list) { 2557 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); 2558 2559 /* Update the HW references with the new level and top ptr */ 2560 set_dte_entry(iommu, dev_data, top_paddr, top_level); 2561 clone_aliases(iommu, dev_data->dev); 2562 } 2563 2564 list_for_each_entry(dev_data, &pdom->dev_list, list) 2565 device_flush_dte(dev_data); 2566 2567 domain_flush_complete(pdom); 2568 } 2569 2570 /* 2571 * amd_iommu_iotlb_sync_map() is used to generate flushes for non-present to 2572 * present (ie mapping) operations. It is a NOP if the IOMMU doesn't have non 2573 * present caching (like hypervisor shadowing). 2574 */ 2575 static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, 2576 unsigned long iova, size_t size) 2577 { 2578 struct protection_domain *domain = to_pdomain(dom); 2579 unsigned long flags; 2580 2581 if (likely(!amd_iommu_np_cache)) 2582 return 0; 2583 2584 spin_lock_irqsave(&domain->lock, flags); 2585 amd_iommu_domain_flush_pages(domain, iova, size); 2586 spin_unlock_irqrestore(&domain->lock, flags); 2587 return 0; 2588 } 2589 2590 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) 2591 { 2592 struct protection_domain *dom = to_pdomain(domain); 2593 unsigned long flags; 2594 2595 spin_lock_irqsave(&dom->lock, flags); 2596 amd_iommu_domain_flush_all(dom); 2597 spin_unlock_irqrestore(&dom->lock, flags); 2598 } 2599 2600 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, 2601 struct iommu_iotlb_gather *gather) 2602 { 2603 struct protection_domain *dom = to_pdomain(domain); 2604 unsigned long flags; 2605 2606 spin_lock_irqsave(&dom->lock, flags); 2607 amd_iommu_domain_flush_pages(dom, gather->start, 2608 gather->end - gather->start + 1); 2609 spin_unlock_irqrestore(&dom->lock, flags); 2610 iommu_put_pages_list(&gather->freelist); 2611 } 2612 2613 static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = { 2614 .get_top_lock = amd_iommu_get_top_lock, 2615 .change_top = amd_iommu_change_top, 2616 }; 2617 2618 static const struct iommu_domain_ops amdv1_ops = { 2619 IOMMU_PT_DOMAIN_OPS(amdv1), 2620 .iotlb_sync_map = amd_iommu_iotlb_sync_map, 2621 .flush_iotlb_all = amd_iommu_flush_iotlb_all, 2622 .iotlb_sync = amd_iommu_iotlb_sync, 2623 .attach_dev = amd_iommu_attach_device, 2624 .free = amd_iommu_domain_free, 2625 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, 2626 }; 2627 2628 static const struct iommu_dirty_ops amdv1_dirty_ops = { 2629 IOMMU_PT_DIRTY_OPS(amdv1), 2630 .set_dirty_tracking = amd_iommu_set_dirty_tracking, 2631 }; 2632 2633 static struct iommu_domain *amd_iommu_domain_alloc_paging_v1(struct device *dev, 2634 u32 flags) 2635 { 2636 struct pt_iommu_amdv1_cfg cfg = {}; 2637 struct protection_domain *domain; 2638 int ret; 2639 2640 if (amd_iommu_hatdis) 2641 return ERR_PTR(-EOPNOTSUPP); 2642 2643 domain = protection_domain_alloc(); 2644 if (!domain) 2645 return ERR_PTR(-ENOMEM); 2646 2647 domain->pd_mode = PD_MODE_V1; 2648 domain->iommu.driver_ops = &amd_hw_driver_ops_v1; 2649 domain->iommu.nid = dev_to_node(dev); 2650 if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) 2651 domain->domain.dirty_ops = &amdv1_dirty_ops; 2652 2653 /* 2654 * Someday FORCE_COHERENCE should be set by 2655 * amd_iommu_enforce_cache_coherency() like VT-d does. 2656 */ 2657 cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) | 2658 BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) | 2659 BIT(PT_FEAT_AMDV1_FORCE_COHERENCE); 2660 2661 /* 2662 * AMD's IOMMU can flush as many pages as necessary in a single flush. 2663 * Unless we run in a virtual machine, which can be inferred according 2664 * to whether "non-present cache" is on, it is probably best to prefer 2665 * (potentially) too extensive TLB flushing (i.e., more misses) over 2666 * multiple TLB flushes (i.e., more flushes). For virtual machines the 2667 * hypervisor needs to synchronize the host IOMMU PTEs with those of 2668 * the guest, and the trade-off is different: unnecessary TLB flushes 2669 * should be avoided. 2670 */ 2671 if (amd_iommu_np_cache) 2672 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS); 2673 else 2674 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE); 2675 2676 cfg.common.hw_max_vasz_lg2 = 2677 min(64, (amd_iommu_hpt_level - 1) * 9 + 21); 2678 cfg.common.hw_max_oasz_lg2 = 52; 2679 cfg.starting_level = 2; 2680 domain->domain.ops = &amdv1_ops; 2681 2682 ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL); 2683 if (ret) { 2684 amd_iommu_domain_free(&domain->domain); 2685 return ERR_PTR(ret); 2686 } 2687 2688 /* 2689 * Narrow the supported page sizes to those selected by the kernel 2690 * command line. 2691 */ 2692 domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap; 2693 return &domain->domain; 2694 } 2695 2696 static const struct iommu_domain_ops amdv2_ops = { 2697 IOMMU_PT_DOMAIN_OPS(x86_64), 2698 .iotlb_sync_map = amd_iommu_iotlb_sync_map, 2699 .flush_iotlb_all = amd_iommu_flush_iotlb_all, 2700 .iotlb_sync = amd_iommu_iotlb_sync, 2701 .attach_dev = amd_iommu_attach_device, 2702 .free = amd_iommu_domain_free, 2703 /* 2704 * Note the AMDv2 page table format does not support a Force Coherency 2705 * bit, so enforce_cache_coherency should not be set. However VFIO is 2706 * not prepared to handle a case where some domains will support 2707 * enforcement and others do not. VFIO and iommufd will have to be fixed 2708 * before it can fully use the V2 page table. See the comment in 2709 * iommufd_hwpt_paging_alloc(). For now leave things as they have 2710 * historically been and lie about enforce_cache_coherencey. 2711 */ 2712 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, 2713 }; 2714 2715 static struct iommu_domain *amd_iommu_domain_alloc_paging_v2(struct device *dev, 2716 u32 flags) 2717 { 2718 struct pt_iommu_x86_64_cfg cfg = {}; 2719 struct protection_domain *domain; 2720 int ret; 2721 2722 if (!amd_iommu_v2_pgtbl_supported()) 2723 return ERR_PTR(-EOPNOTSUPP); 2724 2725 domain = protection_domain_alloc(); 2726 if (!domain) 2727 return ERR_PTR(-ENOMEM); 2728 2729 domain->pd_mode = PD_MODE_V2; 2730 domain->iommu.nid = dev_to_node(dev); 2731 2732 cfg.common.features = BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES); 2733 if (amd_iommu_np_cache) 2734 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS); 2735 else 2736 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE); 2737 2738 /* 2739 * The v2 table behaves differently if it is attached to PASID 0 vs a 2740 * non-zero PASID. On PASID 0 it has no sign extension and the full 2741 * 57/48 bits decode the lower addresses. Otherwise it behaves like a 2742 * normal sign extended x86 page table. Since we want the domain to work 2743 * in both modes the top bit is removed and PT_FEAT_SIGN_EXTEND is not 2744 * set which creates a table that is compatible in both modes. 2745 */ 2746 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) { 2747 cfg.common.hw_max_vasz_lg2 = 56; 2748 cfg.top_level = 4; 2749 } else { 2750 cfg.common.hw_max_vasz_lg2 = 47; 2751 cfg.top_level = 3; 2752 } 2753 cfg.common.hw_max_oasz_lg2 = 52; 2754 domain->domain.ops = &amdv2_ops; 2755 2756 ret = pt_iommu_x86_64_init(&domain->amdv2, &cfg, GFP_KERNEL); 2757 if (ret) { 2758 amd_iommu_domain_free(&domain->domain); 2759 return ERR_PTR(ret); 2760 } 2761 return &domain->domain; 2762 } 2763 2764 static struct iommu_domain * 2765 amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, 2766 const struct iommu_user_data *user_data) 2767 2768 { 2769 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2770 const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | 2771 IOMMU_HWPT_ALLOC_PASID; 2772 2773 if ((flags & ~supported_flags) || user_data) 2774 return ERR_PTR(-EOPNOTSUPP); 2775 2776 switch (flags & supported_flags) { 2777 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING: 2778 /* Allocate domain with v1 page table for dirty tracking */ 2779 if (!amd_iommu_hd_support(iommu)) 2780 break; 2781 return amd_iommu_domain_alloc_paging_v1(dev, flags); 2782 case IOMMU_HWPT_ALLOC_PASID: 2783 /* Allocate domain with v2 page table if IOMMU supports PASID. */ 2784 if (!amd_iommu_pasid_supported()) 2785 break; 2786 return amd_iommu_domain_alloc_paging_v2(dev, flags); 2787 case 0: { 2788 struct iommu_domain *ret; 2789 2790 /* If nothing specific is required use the kernel commandline default */ 2791 if (amd_iommu_pgtable == PD_MODE_V1) { 2792 ret = amd_iommu_domain_alloc_paging_v1(dev, flags); 2793 if (ret != ERR_PTR(-EOPNOTSUPP)) 2794 return ret; 2795 return amd_iommu_domain_alloc_paging_v2(dev, flags); 2796 } 2797 ret = amd_iommu_domain_alloc_paging_v2(dev, flags); 2798 if (ret != ERR_PTR(-EOPNOTSUPP)) 2799 return ret; 2800 return amd_iommu_domain_alloc_paging_v1(dev, flags); 2801 } 2802 default: 2803 break; 2804 } 2805 return ERR_PTR(-EOPNOTSUPP); 2806 } 2807 2808 void amd_iommu_domain_free(struct iommu_domain *dom) 2809 { 2810 struct protection_domain *domain = to_pdomain(dom); 2811 2812 WARN_ON(!list_empty(&domain->dev_list)); 2813 pt_iommu_deinit(&domain->iommu); 2814 amd_iommu_pdom_id_free(domain->id); 2815 kfree(domain); 2816 } 2817 2818 static int blocked_domain_attach_device(struct iommu_domain *domain, 2819 struct device *dev, 2820 struct iommu_domain *old) 2821 { 2822 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2823 2824 if (dev_data->domain) 2825 detach_device(dev); 2826 2827 /* Clear DTE and flush the entry */ 2828 mutex_lock(&dev_data->mutex); 2829 dev_update_dte(dev_data, false); 2830 mutex_unlock(&dev_data->mutex); 2831 2832 return 0; 2833 } 2834 2835 static int blocked_domain_set_dev_pasid(struct iommu_domain *domain, 2836 struct device *dev, ioasid_t pasid, 2837 struct iommu_domain *old) 2838 { 2839 amd_iommu_remove_dev_pasid(dev, pasid, old); 2840 return 0; 2841 } 2842 2843 static struct iommu_domain blocked_domain = { 2844 .type = IOMMU_DOMAIN_BLOCKED, 2845 .ops = &(const struct iommu_domain_ops) { 2846 .attach_dev = blocked_domain_attach_device, 2847 .set_dev_pasid = blocked_domain_set_dev_pasid, 2848 } 2849 }; 2850 2851 static struct protection_domain identity_domain; 2852 2853 static const struct iommu_domain_ops identity_domain_ops = { 2854 .attach_dev = amd_iommu_attach_device, 2855 }; 2856 2857 void amd_iommu_init_identity_domain(void) 2858 { 2859 struct iommu_domain *domain = &identity_domain.domain; 2860 2861 domain->type = IOMMU_DOMAIN_IDENTITY; 2862 domain->ops = &identity_domain_ops; 2863 domain->owner = &amd_iommu_ops; 2864 2865 identity_domain.id = amd_iommu_pdom_id_alloc(); 2866 2867 protection_domain_init(&identity_domain); 2868 } 2869 2870 static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev, 2871 struct iommu_domain *old) 2872 { 2873 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2874 struct protection_domain *domain = to_pdomain(dom); 2875 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2876 int ret; 2877 2878 /* 2879 * Skip attach device to domain if new domain is same as 2880 * devices current domain 2881 */ 2882 if (dev_data->domain == domain) 2883 return 0; 2884 2885 dev_data->defer_attach = false; 2886 2887 /* 2888 * Restrict to devices with compatible IOMMU hardware support 2889 * when enforcement of dirty tracking is enabled. 2890 */ 2891 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) 2892 return -EINVAL; 2893 2894 if (dev_data->domain) 2895 detach_device(dev); 2896 2897 ret = attach_device(dev, domain); 2898 2899 #ifdef CONFIG_IRQ_REMAP 2900 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { 2901 if (dom->type == IOMMU_DOMAIN_UNMANAGED) 2902 dev_data->use_vapic = 1; 2903 else 2904 dev_data->use_vapic = 0; 2905 } 2906 #endif 2907 2908 return ret; 2909 } 2910 2911 static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) 2912 { 2913 switch (cap) { 2914 case IOMMU_CAP_CACHE_COHERENCY: 2915 return true; 2916 case IOMMU_CAP_NOEXEC: 2917 return false; 2918 case IOMMU_CAP_PRE_BOOT_PROTECTION: 2919 return amdr_ivrs_remap_support; 2920 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: 2921 return true; 2922 case IOMMU_CAP_DEFERRED_FLUSH: 2923 return true; 2924 case IOMMU_CAP_DIRTY_TRACKING: { 2925 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2926 2927 return amd_iommu_hd_support(iommu); 2928 } 2929 default: 2930 break; 2931 } 2932 2933 return false; 2934 } 2935 2936 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, 2937 bool enable) 2938 { 2939 struct protection_domain *pdomain = to_pdomain(domain); 2940 struct dev_table_entry *dte; 2941 struct iommu_dev_data *dev_data; 2942 bool domain_flush = false; 2943 struct amd_iommu *iommu; 2944 unsigned long flags; 2945 u64 new; 2946 2947 spin_lock_irqsave(&pdomain->lock, flags); 2948 if (!(pdomain->dirty_tracking ^ enable)) { 2949 spin_unlock_irqrestore(&pdomain->lock, flags); 2950 return 0; 2951 } 2952 2953 list_for_each_entry(dev_data, &pdomain->dev_list, list) { 2954 spin_lock(&dev_data->dte_lock); 2955 iommu = get_amd_iommu_from_dev_data(dev_data); 2956 dte = &get_dev_table(iommu)[dev_data->devid]; 2957 new = dte->data[0]; 2958 new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD); 2959 dte->data[0] = new; 2960 spin_unlock(&dev_data->dte_lock); 2961 2962 /* Flush device DTE */ 2963 device_flush_dte(dev_data); 2964 domain_flush = true; 2965 } 2966 2967 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ 2968 if (domain_flush) 2969 amd_iommu_domain_flush_all(pdomain); 2970 2971 pdomain->dirty_tracking = enable; 2972 spin_unlock_irqrestore(&pdomain->lock, flags); 2973 2974 return 0; 2975 } 2976 2977 static void amd_iommu_get_resv_regions(struct device *dev, 2978 struct list_head *head) 2979 { 2980 struct iommu_resv_region *region; 2981 struct unity_map_entry *entry; 2982 struct amd_iommu *iommu; 2983 struct amd_iommu_pci_seg *pci_seg; 2984 int devid, sbdf; 2985 2986 sbdf = get_device_sbdf_id(dev); 2987 if (sbdf < 0) 2988 return; 2989 2990 devid = PCI_SBDF_TO_DEVID(sbdf); 2991 iommu = get_amd_iommu_from_dev(dev); 2992 pci_seg = iommu->pci_seg; 2993 2994 list_for_each_entry(entry, &pci_seg->unity_map, list) { 2995 int type, prot = 0; 2996 size_t length; 2997 2998 if (devid < entry->devid_start || devid > entry->devid_end) 2999 continue; 3000 3001 type = IOMMU_RESV_DIRECT; 3002 length = entry->address_end - entry->address_start; 3003 if (entry->prot & IOMMU_PROT_IR) 3004 prot |= IOMMU_READ; 3005 if (entry->prot & IOMMU_PROT_IW) 3006 prot |= IOMMU_WRITE; 3007 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) 3008 /* Exclusion range */ 3009 type = IOMMU_RESV_RESERVED; 3010 3011 region = iommu_alloc_resv_region(entry->address_start, 3012 length, prot, type, 3013 GFP_KERNEL); 3014 if (!region) { 3015 dev_err(dev, "Out of memory allocating dm-regions\n"); 3016 return; 3017 } 3018 list_add_tail(®ion->list, head); 3019 } 3020 3021 region = iommu_alloc_resv_region(MSI_RANGE_START, 3022 MSI_RANGE_END - MSI_RANGE_START + 1, 3023 0, IOMMU_RESV_MSI, GFP_KERNEL); 3024 if (!region) 3025 return; 3026 list_add_tail(®ion->list, head); 3027 3028 if (amd_iommu_ht_range_ignore()) 3029 return; 3030 3031 region = iommu_alloc_resv_region(HT_RANGE_START, 3032 HT_RANGE_END - HT_RANGE_START + 1, 3033 0, IOMMU_RESV_RESERVED, GFP_KERNEL); 3034 if (!region) 3035 return; 3036 list_add_tail(®ion->list, head); 3037 } 3038 3039 static bool amd_iommu_is_attach_deferred(struct device *dev) 3040 { 3041 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 3042 3043 return dev_data->defer_attach; 3044 } 3045 3046 static int amd_iommu_def_domain_type(struct device *dev) 3047 { 3048 struct iommu_dev_data *dev_data; 3049 3050 dev_data = dev_iommu_priv_get(dev); 3051 if (!dev_data) 3052 return 0; 3053 3054 /* Always use DMA domain for untrusted device */ 3055 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 3056 return IOMMU_DOMAIN_DMA; 3057 3058 /* 3059 * Do not identity map IOMMUv2 capable devices when: 3060 * - memory encryption is active, because some of those devices 3061 * (AMD GPUs) don't have the encryption bit in their DMA-mask 3062 * and require remapping. 3063 * - SNP is enabled, because it prohibits DTE[Mode]=0. 3064 */ 3065 if (pdev_pasid_supported(dev_data) && 3066 !cc_platform_has(CC_ATTR_MEM_ENCRYPT) && 3067 !amd_iommu_snp_en) { 3068 return IOMMU_DOMAIN_IDENTITY; 3069 } 3070 3071 return 0; 3072 } 3073 3074 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) 3075 { 3076 /* IOMMU_PTE_FC is always set */ 3077 return true; 3078 } 3079 3080 const struct iommu_ops amd_iommu_ops = { 3081 .capable = amd_iommu_capable, 3082 .blocked_domain = &blocked_domain, 3083 .release_domain = &blocked_domain, 3084 .identity_domain = &identity_domain.domain, 3085 .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, 3086 .domain_alloc_sva = amd_iommu_domain_alloc_sva, 3087 .probe_device = amd_iommu_probe_device, 3088 .release_device = amd_iommu_release_device, 3089 .device_group = amd_iommu_device_group, 3090 .get_resv_regions = amd_iommu_get_resv_regions, 3091 .is_attach_deferred = amd_iommu_is_attach_deferred, 3092 .def_domain_type = amd_iommu_def_domain_type, 3093 .page_response = amd_iommu_page_response, 3094 }; 3095 3096 #ifdef CONFIG_IRQ_REMAP 3097 3098 /***************************************************************************** 3099 * 3100 * Interrupt Remapping Implementation 3101 * 3102 *****************************************************************************/ 3103 3104 static struct irq_chip amd_ir_chip; 3105 static DEFINE_SPINLOCK(iommu_table_lock); 3106 3107 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) 3108 { 3109 int ret; 3110 u64 data; 3111 unsigned long flags; 3112 struct iommu_cmd cmd, cmd2; 3113 3114 if (iommu->irtcachedis_enabled) 3115 return; 3116 3117 build_inv_irt(&cmd, devid); 3118 data = atomic64_inc_return(&iommu->cmd_sem_val); 3119 build_completion_wait(&cmd2, iommu, data); 3120 3121 raw_spin_lock_irqsave(&iommu->lock, flags); 3122 ret = __iommu_queue_command_sync(iommu, &cmd, true); 3123 if (ret) 3124 goto out; 3125 ret = __iommu_queue_command_sync(iommu, &cmd2, false); 3126 if (ret) 3127 goto out; 3128 wait_on_sem(iommu, data); 3129 out: 3130 raw_spin_unlock_irqrestore(&iommu->lock, flags); 3131 } 3132 3133 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data) 3134 { 3135 if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K) 3136 return DTE_INTTABLEN_2K; 3137 return DTE_INTTABLEN_512; 3138 } 3139 3140 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, 3141 struct irq_remap_table *table) 3142 { 3143 u64 new; 3144 struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; 3145 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); 3146 3147 if (dev_data) 3148 spin_lock(&dev_data->dte_lock); 3149 3150 new = READ_ONCE(dte->data[2]); 3151 new &= ~DTE_IRQ_PHYS_ADDR_MASK; 3152 new |= iommu_virt_to_phys(table->table); 3153 new |= DTE_IRQ_REMAP_INTCTL; 3154 new |= iommu_get_int_tablen(dev_data); 3155 new |= DTE_IRQ_REMAP_ENABLE; 3156 WRITE_ONCE(dte->data[2], new); 3157 3158 if (dev_data) 3159 spin_unlock(&dev_data->dte_lock); 3160 } 3161 3162 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) 3163 { 3164 struct irq_remap_table *table; 3165 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 3166 3167 if (WARN_ONCE(!pci_seg->rlookup_table[devid], 3168 "%s: no iommu for devid %x:%x\n", 3169 __func__, pci_seg->id, devid)) 3170 return NULL; 3171 3172 table = pci_seg->irq_lookup_table[devid]; 3173 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n", 3174 __func__, pci_seg->id, devid)) 3175 return NULL; 3176 3177 return table; 3178 } 3179 3180 static struct irq_remap_table *__alloc_irq_table(int nid, size_t size) 3181 { 3182 struct irq_remap_table *table; 3183 3184 table = kzalloc(sizeof(*table), GFP_KERNEL); 3185 if (!table) 3186 return NULL; 3187 3188 table->table = iommu_alloc_pages_node_sz( 3189 nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size)); 3190 if (!table->table) { 3191 kfree(table); 3192 return NULL; 3193 } 3194 raw_spin_lock_init(&table->lock); 3195 3196 return table; 3197 } 3198 3199 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, 3200 struct irq_remap_table *table) 3201 { 3202 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 3203 3204 pci_seg->irq_lookup_table[devid] = table; 3205 set_dte_irq_entry(iommu, devid, table); 3206 iommu_flush_dte(iommu, devid); 3207 } 3208 3209 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, 3210 void *data) 3211 { 3212 struct irq_remap_table *table = data; 3213 struct amd_iommu_pci_seg *pci_seg; 3214 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); 3215 3216 if (!iommu) 3217 return -EINVAL; 3218 3219 pci_seg = iommu->pci_seg; 3220 pci_seg->irq_lookup_table[alias] = table; 3221 set_dte_irq_entry(iommu, alias, table); 3222 iommu_flush_dte(pci_seg->rlookup_table[alias], alias); 3223 3224 return 0; 3225 } 3226 3227 static inline size_t get_irq_table_size(unsigned int max_irqs) 3228 { 3229 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3230 return max_irqs * sizeof(u32); 3231 3232 return max_irqs * (sizeof(u64) * 2); 3233 } 3234 3235 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, 3236 u16 devid, struct pci_dev *pdev, 3237 unsigned int max_irqs) 3238 { 3239 struct irq_remap_table *table = NULL; 3240 struct irq_remap_table *new_table = NULL; 3241 struct amd_iommu_pci_seg *pci_seg; 3242 unsigned long flags; 3243 int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; 3244 u16 alias; 3245 3246 spin_lock_irqsave(&iommu_table_lock, flags); 3247 3248 pci_seg = iommu->pci_seg; 3249 table = pci_seg->irq_lookup_table[devid]; 3250 if (table) 3251 goto out_unlock; 3252 3253 alias = pci_seg->alias_table[devid]; 3254 table = pci_seg->irq_lookup_table[alias]; 3255 if (table) { 3256 set_remap_table_entry(iommu, devid, table); 3257 goto out_wait; 3258 } 3259 spin_unlock_irqrestore(&iommu_table_lock, flags); 3260 3261 /* Nothing there yet, allocate new irq remapping table */ 3262 new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs)); 3263 if (!new_table) 3264 return NULL; 3265 3266 spin_lock_irqsave(&iommu_table_lock, flags); 3267 3268 table = pci_seg->irq_lookup_table[devid]; 3269 if (table) 3270 goto out_unlock; 3271 3272 table = pci_seg->irq_lookup_table[alias]; 3273 if (table) { 3274 set_remap_table_entry(iommu, devid, table); 3275 goto out_wait; 3276 } 3277 3278 table = new_table; 3279 new_table = NULL; 3280 3281 if (pdev) 3282 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias, 3283 table); 3284 else 3285 set_remap_table_entry(iommu, devid, table); 3286 3287 if (devid != alias) 3288 set_remap_table_entry(iommu, alias, table); 3289 3290 out_wait: 3291 iommu_completion_wait(iommu); 3292 3293 out_unlock: 3294 spin_unlock_irqrestore(&iommu_table_lock, flags); 3295 3296 if (new_table) { 3297 iommu_free_pages(new_table->table); 3298 kfree(new_table); 3299 } 3300 return table; 3301 } 3302 3303 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, 3304 bool align, struct pci_dev *pdev, 3305 unsigned long max_irqs) 3306 { 3307 struct irq_remap_table *table; 3308 int index, c, alignment = 1; 3309 unsigned long flags; 3310 3311 table = alloc_irq_table(iommu, devid, pdev, max_irqs); 3312 if (!table) 3313 return -ENODEV; 3314 3315 if (align) 3316 alignment = roundup_pow_of_two(count); 3317 3318 raw_spin_lock_irqsave(&table->lock, flags); 3319 3320 /* Scan table for free entries */ 3321 for (index = ALIGN(table->min_index, alignment), c = 0; 3322 index < max_irqs;) { 3323 if (!iommu->irte_ops->is_allocated(table, index)) { 3324 c += 1; 3325 } else { 3326 c = 0; 3327 index = ALIGN(index + 1, alignment); 3328 continue; 3329 } 3330 3331 if (c == count) { 3332 for (; c != 0; --c) 3333 iommu->irte_ops->set_allocated(table, index - c + 1); 3334 3335 index -= count - 1; 3336 goto out; 3337 } 3338 3339 index++; 3340 } 3341 3342 index = -ENOSPC; 3343 3344 out: 3345 raw_spin_unlock_irqrestore(&table->lock, flags); 3346 3347 return index; 3348 } 3349 3350 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, 3351 struct irte_ga *irte) 3352 { 3353 struct irq_remap_table *table; 3354 struct irte_ga *entry; 3355 unsigned long flags; 3356 u128 old; 3357 3358 table = get_irq_table(iommu, devid); 3359 if (!table) 3360 return -ENOMEM; 3361 3362 raw_spin_lock_irqsave(&table->lock, flags); 3363 3364 entry = (struct irte_ga *)table->table; 3365 entry = &entry[index]; 3366 3367 /* 3368 * We use cmpxchg16 to atomically update the 128-bit IRTE, 3369 * and it cannot be updated by the hardware or other processors 3370 * behind us, so the return value of cmpxchg16 should be the 3371 * same as the old value. 3372 */ 3373 old = entry->irte; 3374 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); 3375 3376 raw_spin_unlock_irqrestore(&table->lock, flags); 3377 3378 return 0; 3379 } 3380 3381 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, 3382 struct irte_ga *irte) 3383 { 3384 int ret; 3385 3386 ret = __modify_irte_ga(iommu, devid, index, irte); 3387 if (ret) 3388 return ret; 3389 3390 iommu_flush_irt_and_complete(iommu, devid); 3391 3392 return 0; 3393 } 3394 3395 static int modify_irte(struct amd_iommu *iommu, 3396 u16 devid, int index, union irte *irte) 3397 { 3398 struct irq_remap_table *table; 3399 unsigned long flags; 3400 3401 table = get_irq_table(iommu, devid); 3402 if (!table) 3403 return -ENOMEM; 3404 3405 raw_spin_lock_irqsave(&table->lock, flags); 3406 table->table[index] = irte->val; 3407 raw_spin_unlock_irqrestore(&table->lock, flags); 3408 3409 iommu_flush_irt_and_complete(iommu, devid); 3410 3411 return 0; 3412 } 3413 3414 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) 3415 { 3416 struct irq_remap_table *table; 3417 unsigned long flags; 3418 3419 table = get_irq_table(iommu, devid); 3420 if (!table) 3421 return; 3422 3423 raw_spin_lock_irqsave(&table->lock, flags); 3424 iommu->irte_ops->clear_allocated(table, index); 3425 raw_spin_unlock_irqrestore(&table->lock, flags); 3426 3427 iommu_flush_irt_and_complete(iommu, devid); 3428 } 3429 3430 static void irte_prepare(void *entry, 3431 u32 delivery_mode, bool dest_mode, 3432 u8 vector, u32 dest_apicid, int devid) 3433 { 3434 union irte *irte = (union irte *) entry; 3435 3436 irte->val = 0; 3437 irte->fields.vector = vector; 3438 irte->fields.int_type = delivery_mode; 3439 irte->fields.destination = dest_apicid; 3440 irte->fields.dm = dest_mode; 3441 irte->fields.valid = 1; 3442 } 3443 3444 static void irte_ga_prepare(void *entry, 3445 u32 delivery_mode, bool dest_mode, 3446 u8 vector, u32 dest_apicid, int devid) 3447 { 3448 struct irte_ga *irte = (struct irte_ga *) entry; 3449 3450 irte->lo.val = 0; 3451 irte->hi.val = 0; 3452 irte->lo.fields_remap.int_type = delivery_mode; 3453 irte->lo.fields_remap.dm = dest_mode; 3454 irte->hi.fields.vector = vector; 3455 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); 3456 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); 3457 irte->lo.fields_remap.valid = 1; 3458 } 3459 3460 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3461 { 3462 union irte *irte = (union irte *) entry; 3463 3464 irte->fields.valid = 1; 3465 modify_irte(iommu, devid, index, irte); 3466 } 3467 3468 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3469 { 3470 struct irte_ga *irte = (struct irte_ga *) entry; 3471 3472 irte->lo.fields_remap.valid = 1; 3473 modify_irte_ga(iommu, devid, index, irte); 3474 } 3475 3476 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3477 { 3478 union irte *irte = (union irte *) entry; 3479 3480 irte->fields.valid = 0; 3481 modify_irte(iommu, devid, index, irte); 3482 } 3483 3484 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3485 { 3486 struct irte_ga *irte = (struct irte_ga *) entry; 3487 3488 irte->lo.fields_remap.valid = 0; 3489 modify_irte_ga(iommu, devid, index, irte); 3490 } 3491 3492 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, 3493 u8 vector, u32 dest_apicid) 3494 { 3495 union irte *irte = (union irte *) entry; 3496 3497 irte->fields.vector = vector; 3498 irte->fields.destination = dest_apicid; 3499 modify_irte(iommu, devid, index, irte); 3500 } 3501 3502 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, 3503 u8 vector, u32 dest_apicid) 3504 { 3505 struct irte_ga *irte = (struct irte_ga *) entry; 3506 3507 if (!irte->lo.fields_remap.guest_mode) { 3508 irte->hi.fields.vector = vector; 3509 irte->lo.fields_remap.destination = 3510 APICID_TO_IRTE_DEST_LO(dest_apicid); 3511 irte->hi.fields.destination = 3512 APICID_TO_IRTE_DEST_HI(dest_apicid); 3513 modify_irte_ga(iommu, devid, index, irte); 3514 } 3515 } 3516 3517 #define IRTE_ALLOCATED (~1U) 3518 static void irte_set_allocated(struct irq_remap_table *table, int index) 3519 { 3520 table->table[index] = IRTE_ALLOCATED; 3521 } 3522 3523 static void irte_ga_set_allocated(struct irq_remap_table *table, int index) 3524 { 3525 struct irte_ga *ptr = (struct irte_ga *)table->table; 3526 struct irte_ga *irte = &ptr[index]; 3527 3528 memset(&irte->lo.val, 0, sizeof(u64)); 3529 memset(&irte->hi.val, 0, sizeof(u64)); 3530 irte->hi.fields.vector = 0xff; 3531 } 3532 3533 static bool irte_is_allocated(struct irq_remap_table *table, int index) 3534 { 3535 union irte *ptr = (union irte *)table->table; 3536 union irte *irte = &ptr[index]; 3537 3538 return irte->val != 0; 3539 } 3540 3541 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index) 3542 { 3543 struct irte_ga *ptr = (struct irte_ga *)table->table; 3544 struct irte_ga *irte = &ptr[index]; 3545 3546 return irte->hi.fields.vector != 0; 3547 } 3548 3549 static void irte_clear_allocated(struct irq_remap_table *table, int index) 3550 { 3551 table->table[index] = 0; 3552 } 3553 3554 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index) 3555 { 3556 struct irte_ga *ptr = (struct irte_ga *)table->table; 3557 struct irte_ga *irte = &ptr[index]; 3558 3559 memset(&irte->lo.val, 0, sizeof(u64)); 3560 memset(&irte->hi.val, 0, sizeof(u64)); 3561 } 3562 3563 static int get_devid(struct irq_alloc_info *info) 3564 { 3565 switch (info->type) { 3566 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3567 return get_ioapic_devid(info->devid); 3568 case X86_IRQ_ALLOC_TYPE_HPET: 3569 return get_hpet_devid(info->devid); 3570 case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3571 case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3572 return get_device_sbdf_id(msi_desc_to_dev(info->desc)); 3573 default: 3574 WARN_ON_ONCE(1); 3575 return -1; 3576 } 3577 } 3578 3579 struct irq_remap_ops amd_iommu_irq_ops = { 3580 .prepare = amd_iommu_prepare, 3581 .enable = amd_iommu_enable, 3582 .disable = amd_iommu_disable, 3583 .reenable = amd_iommu_reenable, 3584 .enable_faulting = amd_iommu_enable_faulting, 3585 }; 3586 3587 static void fill_msi_msg(struct msi_msg *msg, u32 index) 3588 { 3589 msg->data = index; 3590 msg->address_lo = 0; 3591 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; 3592 /* 3593 * The struct msi_msg.dest_mode_logical is used to set the DM bit 3594 * in MSI Message Address Register. For device w/ 2K int-remap support, 3595 * this is bit must be set to 1 regardless of the actual destination 3596 * mode, which is signified by the IRTE[DM]. 3597 */ 3598 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) 3599 msg->arch_addr_lo.dest_mode_logical = true; 3600 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; 3601 } 3602 3603 static void irq_remapping_prepare_irte(struct amd_ir_data *data, 3604 struct irq_cfg *irq_cfg, 3605 struct irq_alloc_info *info, 3606 int devid, int index, int sub_handle) 3607 { 3608 struct irq_2_irte *irte_info = &data->irq_2_irte; 3609 struct amd_iommu *iommu = data->iommu; 3610 3611 if (!iommu) 3612 return; 3613 3614 data->irq_2_irte.devid = devid; 3615 data->irq_2_irte.index = index + sub_handle; 3616 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, 3617 apic->dest_mode_logical, irq_cfg->vector, 3618 irq_cfg->dest_apicid, devid); 3619 3620 switch (info->type) { 3621 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3622 case X86_IRQ_ALLOC_TYPE_HPET: 3623 case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3624 case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3625 fill_msi_msg(&data->msi_entry, irte_info->index); 3626 break; 3627 3628 default: 3629 BUG_ON(1); 3630 break; 3631 } 3632 } 3633 3634 struct amd_irte_ops irte_32_ops = { 3635 .prepare = irte_prepare, 3636 .activate = irte_activate, 3637 .deactivate = irte_deactivate, 3638 .set_affinity = irte_set_affinity, 3639 .set_allocated = irte_set_allocated, 3640 .is_allocated = irte_is_allocated, 3641 .clear_allocated = irte_clear_allocated, 3642 }; 3643 3644 struct amd_irte_ops irte_128_ops = { 3645 .prepare = irte_ga_prepare, 3646 .activate = irte_ga_activate, 3647 .deactivate = irte_ga_deactivate, 3648 .set_affinity = irte_ga_set_affinity, 3649 .set_allocated = irte_ga_set_allocated, 3650 .is_allocated = irte_ga_is_allocated, 3651 .clear_allocated = irte_ga_clear_allocated, 3652 }; 3653 3654 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, 3655 unsigned int nr_irqs, void *arg) 3656 { 3657 struct irq_alloc_info *info = arg; 3658 struct irq_data *irq_data; 3659 struct amd_ir_data *data = NULL; 3660 struct amd_iommu *iommu; 3661 struct irq_cfg *cfg; 3662 struct iommu_dev_data *dev_data; 3663 unsigned long max_irqs; 3664 int i, ret, devid, seg, sbdf; 3665 int index; 3666 3667 if (!info) 3668 return -EINVAL; 3669 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) 3670 return -EINVAL; 3671 3672 sbdf = get_devid(info); 3673 if (sbdf < 0) 3674 return -EINVAL; 3675 3676 seg = PCI_SBDF_TO_SEGID(sbdf); 3677 devid = PCI_SBDF_TO_DEVID(sbdf); 3678 iommu = __rlookup_amd_iommu(seg, devid); 3679 if (!iommu) 3680 return -EINVAL; 3681 3682 dev_data = search_dev_data(iommu, devid); 3683 max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512; 3684 3685 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 3686 if (ret < 0) 3687 return ret; 3688 3689 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { 3690 struct irq_remap_table *table; 3691 3692 table = alloc_irq_table(iommu, devid, NULL, max_irqs); 3693 if (table) { 3694 if (!table->min_index) { 3695 /* 3696 * Keep the first 32 indexes free for IOAPIC 3697 * interrupts. 3698 */ 3699 table->min_index = 32; 3700 for (i = 0; i < 32; ++i) 3701 iommu->irte_ops->set_allocated(table, i); 3702 } 3703 WARN_ON(table->min_index != 32); 3704 index = info->ioapic.pin; 3705 } else { 3706 index = -ENOMEM; 3707 } 3708 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || 3709 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { 3710 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); 3711 3712 index = alloc_irq_index(iommu, devid, nr_irqs, align, 3713 msi_desc_to_pci_dev(info->desc), 3714 max_irqs); 3715 } else { 3716 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL, 3717 max_irqs); 3718 } 3719 3720 if (index < 0) { 3721 pr_warn("Failed to allocate IRTE\n"); 3722 ret = index; 3723 goto out_free_parent; 3724 } 3725 3726 for (i = 0; i < nr_irqs; i++) { 3727 irq_data = irq_domain_get_irq_data(domain, virq + i); 3728 cfg = irq_data ? irqd_cfg(irq_data) : NULL; 3729 if (!cfg) { 3730 ret = -EINVAL; 3731 goto out_free_data; 3732 } 3733 3734 ret = -ENOMEM; 3735 data = kzalloc(sizeof(*data), GFP_KERNEL); 3736 if (!data) 3737 goto out_free_data; 3738 3739 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3740 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); 3741 else 3742 data->entry = kzalloc(sizeof(struct irte_ga), 3743 GFP_KERNEL); 3744 if (!data->entry) { 3745 kfree(data); 3746 goto out_free_data; 3747 } 3748 3749 data->iommu = iommu; 3750 irq_data->hwirq = (devid << 16) + i; 3751 irq_data->chip_data = data; 3752 irq_data->chip = &amd_ir_chip; 3753 irq_remapping_prepare_irte(data, cfg, info, devid, index, i); 3754 } 3755 3756 return 0; 3757 3758 out_free_data: 3759 for (i--; i >= 0; i--) { 3760 irq_data = irq_domain_get_irq_data(domain, virq + i); 3761 if (irq_data) 3762 kfree(irq_data->chip_data); 3763 } 3764 for (i = 0; i < nr_irqs; i++) 3765 free_irte(iommu, devid, index + i); 3766 out_free_parent: 3767 irq_domain_free_irqs_common(domain, virq, nr_irqs); 3768 return ret; 3769 } 3770 3771 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, 3772 unsigned int nr_irqs) 3773 { 3774 struct irq_2_irte *irte_info; 3775 struct irq_data *irq_data; 3776 struct amd_ir_data *data; 3777 int i; 3778 3779 for (i = 0; i < nr_irqs; i++) { 3780 irq_data = irq_domain_get_irq_data(domain, virq + i); 3781 if (irq_data && irq_data->chip_data) { 3782 data = irq_data->chip_data; 3783 irte_info = &data->irq_2_irte; 3784 free_irte(data->iommu, irte_info->devid, irte_info->index); 3785 kfree(data->entry); 3786 kfree(data); 3787 } 3788 } 3789 irq_domain_free_irqs_common(domain, virq, nr_irqs); 3790 } 3791 3792 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, 3793 struct amd_ir_data *ir_data, 3794 struct irq_2_irte *irte_info, 3795 struct irq_cfg *cfg); 3796 3797 static int irq_remapping_activate(struct irq_domain *domain, 3798 struct irq_data *irq_data, bool reserve) 3799 { 3800 struct amd_ir_data *data = irq_data->chip_data; 3801 struct irq_2_irte *irte_info = &data->irq_2_irte; 3802 struct amd_iommu *iommu = data->iommu; 3803 struct irq_cfg *cfg = irqd_cfg(irq_data); 3804 3805 if (!iommu) 3806 return 0; 3807 3808 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, 3809 irte_info->index); 3810 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); 3811 return 0; 3812 } 3813 3814 static void irq_remapping_deactivate(struct irq_domain *domain, 3815 struct irq_data *irq_data) 3816 { 3817 struct amd_ir_data *data = irq_data->chip_data; 3818 struct irq_2_irte *irte_info = &data->irq_2_irte; 3819 struct amd_iommu *iommu = data->iommu; 3820 3821 if (iommu) 3822 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, 3823 irte_info->index); 3824 } 3825 3826 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec, 3827 enum irq_domain_bus_token bus_token) 3828 { 3829 struct amd_iommu *iommu; 3830 int devid = -1; 3831 3832 if (!amd_iommu_irq_remap) 3833 return 0; 3834 3835 if (x86_fwspec_is_ioapic(fwspec)) 3836 devid = get_ioapic_devid(fwspec->param[0]); 3837 else if (x86_fwspec_is_hpet(fwspec)) 3838 devid = get_hpet_devid(fwspec->param[0]); 3839 3840 if (devid < 0) 3841 return 0; 3842 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); 3843 3844 return iommu && iommu->ir_domain == d; 3845 } 3846 3847 static const struct irq_domain_ops amd_ir_domain_ops = { 3848 .select = irq_remapping_select, 3849 .alloc = irq_remapping_alloc, 3850 .free = irq_remapping_free, 3851 .activate = irq_remapping_activate, 3852 .deactivate = irq_remapping_deactivate, 3853 }; 3854 3855 static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu, 3856 bool ga_log_intr) 3857 { 3858 if (cpu >= 0) { 3859 entry->lo.fields_vapic.destination = 3860 APICID_TO_IRTE_DEST_LO(cpu); 3861 entry->hi.fields.destination = 3862 APICID_TO_IRTE_DEST_HI(cpu); 3863 entry->lo.fields_vapic.is_run = true; 3864 entry->lo.fields_vapic.ga_log_intr = false; 3865 } else { 3866 entry->lo.fields_vapic.is_run = false; 3867 entry->lo.fields_vapic.ga_log_intr = ga_log_intr; 3868 } 3869 } 3870 3871 /* 3872 * Update the pCPU information for an IRTE that is configured to post IRQs to 3873 * a vCPU, without issuing an IOMMU invalidation for the IRTE. 3874 * 3875 * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination 3876 * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't 3877 * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based 3878 * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is 3879 * blocking and requires a notification wake event). I.e. treat vCPUs that are 3880 * associated with a pCPU as running. This API is intended to be used when a 3881 * vCPU is scheduled in/out (or stops running for any reason), to do a fast 3882 * update of IsRun, GALogIntr, and (conditionally) Destination. 3883 * 3884 * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached 3885 * and thus don't require an invalidation to ensure the IOMMU consumes fresh 3886 * information. 3887 */ 3888 int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr) 3889 { 3890 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 3891 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3892 3893 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) 3894 return -EINVAL; 3895 3896 if (!entry || !entry->lo.fields_vapic.guest_mode) 3897 return 0; 3898 3899 if (!ir_data->iommu) 3900 return -ENODEV; 3901 3902 __amd_iommu_update_ga(entry, cpu, ga_log_intr); 3903 3904 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 3905 ir_data->irq_2_irte.index, entry); 3906 } 3907 EXPORT_SYMBOL(amd_iommu_update_ga); 3908 3909 int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr) 3910 { 3911 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 3912 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3913 u64 valid; 3914 3915 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) 3916 return -EINVAL; 3917 3918 if (!entry) 3919 return 0; 3920 3921 valid = entry->lo.fields_vapic.valid; 3922 3923 entry->lo.val = 0; 3924 entry->hi.val = 0; 3925 3926 entry->lo.fields_vapic.valid = valid; 3927 entry->lo.fields_vapic.guest_mode = 1; 3928 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; 3929 entry->hi.fields.vector = ir_data->ga_vector; 3930 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; 3931 3932 __amd_iommu_update_ga(entry, cpu, ga_log_intr); 3933 3934 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 3935 ir_data->irq_2_irte.index, entry); 3936 } 3937 EXPORT_SYMBOL(amd_iommu_activate_guest_mode); 3938 3939 int amd_iommu_deactivate_guest_mode(void *data) 3940 { 3941 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 3942 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3943 struct irq_cfg *cfg = ir_data->cfg; 3944 u64 valid; 3945 3946 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) 3947 return -EINVAL; 3948 3949 if (!entry || !entry->lo.fields_vapic.guest_mode) 3950 return 0; 3951 3952 valid = entry->lo.fields_remap.valid; 3953 3954 entry->lo.val = 0; 3955 entry->hi.val = 0; 3956 3957 entry->lo.fields_remap.valid = valid; 3958 entry->lo.fields_remap.dm = apic->dest_mode_logical; 3959 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; 3960 entry->hi.fields.vector = cfg->vector; 3961 entry->lo.fields_remap.destination = 3962 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); 3963 entry->hi.fields.destination = 3964 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); 3965 3966 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 3967 ir_data->irq_2_irte.index, entry); 3968 } 3969 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); 3970 3971 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info) 3972 { 3973 int ret; 3974 struct amd_iommu_pi_data *pi_data = info; 3975 struct amd_ir_data *ir_data = data->chip_data; 3976 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; 3977 struct iommu_dev_data *dev_data; 3978 3979 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) 3980 return -EINVAL; 3981 3982 if (ir_data->iommu == NULL) 3983 return -EINVAL; 3984 3985 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); 3986 3987 /* Note: 3988 * This device has never been set up for guest mode. 3989 * we should not modify the IRTE 3990 */ 3991 if (!dev_data || !dev_data->use_vapic) 3992 return -EINVAL; 3993 3994 ir_data->cfg = irqd_cfg(data); 3995 3996 if (pi_data) { 3997 pi_data->ir_data = ir_data; 3998 3999 ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12); 4000 ir_data->ga_vector = pi_data->vector; 4001 ir_data->ga_tag = pi_data->ga_tag; 4002 if (pi_data->is_guest_mode) 4003 ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu, 4004 pi_data->ga_log_intr); 4005 else 4006 ret = amd_iommu_deactivate_guest_mode(ir_data); 4007 } else { 4008 ret = amd_iommu_deactivate_guest_mode(ir_data); 4009 } 4010 4011 return ret; 4012 } 4013 4014 4015 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, 4016 struct amd_ir_data *ir_data, 4017 struct irq_2_irte *irte_info, 4018 struct irq_cfg *cfg) 4019 { 4020 4021 /* 4022 * Atomically updates the IRTE with the new destination, vector 4023 * and flushes the interrupt entry cache. 4024 */ 4025 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, 4026 irte_info->index, cfg->vector, 4027 cfg->dest_apicid); 4028 } 4029 4030 static int amd_ir_set_affinity(struct irq_data *data, 4031 const struct cpumask *mask, bool force) 4032 { 4033 struct amd_ir_data *ir_data = data->chip_data; 4034 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; 4035 struct irq_cfg *cfg = irqd_cfg(data); 4036 struct irq_data *parent = data->parent_data; 4037 struct amd_iommu *iommu = ir_data->iommu; 4038 int ret; 4039 4040 if (!iommu) 4041 return -ENODEV; 4042 4043 ret = parent->chip->irq_set_affinity(parent, mask, force); 4044 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 4045 return ret; 4046 4047 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); 4048 /* 4049 * After this point, all the interrupts will start arriving 4050 * at the new destination. So, time to cleanup the previous 4051 * vector allocation. 4052 */ 4053 vector_schedule_cleanup(cfg); 4054 4055 return IRQ_SET_MASK_OK_DONE; 4056 } 4057 4058 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) 4059 { 4060 struct amd_ir_data *ir_data = irq_data->chip_data; 4061 4062 *msg = ir_data->msi_entry; 4063 } 4064 4065 static struct irq_chip amd_ir_chip = { 4066 .name = "AMD-IR", 4067 .irq_ack = apic_ack_irq, 4068 .irq_set_affinity = amd_ir_set_affinity, 4069 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity, 4070 .irq_compose_msi_msg = ir_compose_msi_msg, 4071 }; 4072 4073 static const struct msi_parent_ops amdvi_msi_parent_ops = { 4074 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, 4075 .bus_select_token = DOMAIN_BUS_AMDVI, 4076 .bus_select_mask = MATCH_PCI_MSI, 4077 .prefix = "IR-", 4078 .init_dev_msi_info = msi_parent_init_dev_msi_info, 4079 }; 4080 4081 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) 4082 { 4083 struct irq_domain_info info = { 4084 .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index), 4085 .ops = &amd_ir_domain_ops, 4086 .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI, 4087 .host_data = iommu, 4088 .parent = arch_get_ir_parent_domain(), 4089 }; 4090 4091 if (!info.fwnode) 4092 return -ENOMEM; 4093 4094 iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops); 4095 if (!iommu->ir_domain) { 4096 irq_domain_free_fwnode(info.fwnode); 4097 return -ENOMEM; 4098 } 4099 return 0; 4100 } 4101 #endif 4102 4103 MODULE_IMPORT_NS("GENERIC_PT_IOMMU"); 4104