1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/ratelimit.h> 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci-ats.h> 15 #include <linux/bitmap.h> 16 #include <linux/slab.h> 17 #include <linux/debugfs.h> 18 #include <linux/scatterlist.h> 19 #include <linux/dma-map-ops.h> 20 #include <linux/dma-direct.h> 21 #include <linux/idr.h> 22 #include <linux/iommu-helper.h> 23 #include <linux/delay.h> 24 #include <linux/amd-iommu.h> 25 #include <linux/notifier.h> 26 #include <linux/export.h> 27 #include <linux/irq.h> 28 #include <linux/msi.h> 29 #include <linux/irqdomain.h> 30 #include <linux/percpu.h> 31 #include <linux/io-pgtable.h> 32 #include <linux/cc_platform.h> 33 #include <asm/irq_remapping.h> 34 #include <asm/io_apic.h> 35 #include <asm/apic.h> 36 #include <asm/hw_irq.h> 37 #include <asm/proto.h> 38 #include <asm/iommu.h> 39 #include <asm/gart.h> 40 #include <asm/dma.h> 41 #include <uapi/linux/iommufd.h> 42 43 #include "amd_iommu.h" 44 #include "../dma-iommu.h" 45 #include "../irq_remapping.h" 46 #include "../iommu-pages.h" 47 48 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 49 50 /* Reserved IOVA ranges */ 51 #define MSI_RANGE_START (0xfee00000) 52 #define MSI_RANGE_END (0xfeefffff) 53 #define HT_RANGE_START (0xfd00000000ULL) 54 #define HT_RANGE_END (0xffffffffffULL) 55 56 LIST_HEAD(ioapic_map); 57 LIST_HEAD(hpet_map); 58 LIST_HEAD(acpihid_map); 59 60 const struct iommu_ops amd_iommu_ops; 61 static const struct iommu_dirty_ops amd_dirty_ops; 62 63 int amd_iommu_max_glx_val = -1; 64 65 /* 66 * general struct to manage commands send to an IOMMU 67 */ 68 struct iommu_cmd { 69 u32 data[4]; 70 }; 71 72 /* 73 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 74 * to know which ones are already in use. 75 */ 76 DEFINE_IDA(pdom_ids); 77 78 static int amd_iommu_attach_device(struct iommu_domain *dom, 79 struct device *dev); 80 81 static void set_dte_entry(struct amd_iommu *iommu, 82 struct iommu_dev_data *dev_data); 83 84 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid); 85 86 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid); 87 88 /**************************************************************************** 89 * 90 * Helper functions 91 * 92 ****************************************************************************/ 93 94 static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val) 95 { 96 /* 97 * Note: 98 * We use arch_cmpxchg128_local() because: 99 * - Need cmpxchg16b instruction mainly for 128-bit store to DTE 100 * (not necessary for cmpxchg since this function is already 101 * protected by a spin_lock for this DTE). 102 * - Neither need LOCK_PREFIX nor try loop because of the spin_lock. 103 */ 104 arch_cmpxchg128_local(ptr, *ptr, val); 105 } 106 107 static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new) 108 { 109 struct dev_table_entry old; 110 111 old.data128[1] = ptr->data128[1]; 112 /* 113 * Preserve DTE_DATA2_INTR_MASK. This needs to be 114 * done here since it requires to be inside 115 * spin_lock(&dev_data->dte_lock) context. 116 */ 117 new->data[2] &= ~DTE_DATA2_INTR_MASK; 118 new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK; 119 120 amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]); 121 } 122 123 static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new) 124 { 125 amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]); 126 } 127 128 /* 129 * Note: 130 * IOMMU reads the entire Device Table entry in a single 256-bit transaction 131 * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver 132 * need to ensure the following: 133 * - DTE[V|GV] bit is being written last when setting. 134 * - DTE[V|GV] bit is being written first when clearing. 135 * 136 * This function is used only by code, which updates DMA translation part of the DTE. 137 * So, only consider control bits related to DMA when updating the entry. 138 */ 139 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, 140 struct dev_table_entry *new) 141 { 142 unsigned long flags; 143 struct dev_table_entry *dev_table = get_dev_table(iommu); 144 struct dev_table_entry *ptr = &dev_table[dev_data->devid]; 145 146 spin_lock_irqsave(&dev_data->dte_lock, flags); 147 148 if (!(ptr->data[0] & DTE_FLAG_V)) { 149 /* Existing DTE is not valid. */ 150 write_dte_upper128(ptr, new); 151 write_dte_lower128(ptr, new); 152 iommu_flush_dte_sync(iommu, dev_data->devid); 153 } else if (!(new->data[0] & DTE_FLAG_V)) { 154 /* Existing DTE is valid. New DTE is not valid. */ 155 write_dte_lower128(ptr, new); 156 write_dte_upper128(ptr, new); 157 iommu_flush_dte_sync(iommu, dev_data->devid); 158 } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) { 159 /* 160 * Both DTEs are valid. 161 * Existing DTE has no guest page table. 162 */ 163 write_dte_upper128(ptr, new); 164 write_dte_lower128(ptr, new); 165 iommu_flush_dte_sync(iommu, dev_data->devid); 166 } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) { 167 /* 168 * Both DTEs are valid. 169 * Existing DTE has guest page table, 170 * new DTE has no guest page table, 171 */ 172 write_dte_lower128(ptr, new); 173 write_dte_upper128(ptr, new); 174 iommu_flush_dte_sync(iommu, dev_data->devid); 175 } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) != 176 FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) { 177 /* 178 * Both DTEs are valid and have guest page table, 179 * but have different number of levels. So, we need 180 * to upadte both upper and lower 128-bit value, which 181 * require disabling and flushing. 182 */ 183 struct dev_table_entry clear = {}; 184 185 /* First disable DTE */ 186 write_dte_lower128(ptr, &clear); 187 iommu_flush_dte_sync(iommu, dev_data->devid); 188 189 /* Then update DTE */ 190 write_dte_upper128(ptr, new); 191 write_dte_lower128(ptr, new); 192 iommu_flush_dte_sync(iommu, dev_data->devid); 193 } else { 194 /* 195 * Both DTEs are valid and have guest page table, 196 * and same number of levels. We just need to only 197 * update the lower 128-bit. So no need to disable DTE. 198 */ 199 write_dte_lower128(ptr, new); 200 } 201 202 spin_unlock_irqrestore(&dev_data->dte_lock, flags); 203 } 204 205 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, 206 struct dev_table_entry *dte) 207 { 208 unsigned long flags; 209 struct dev_table_entry *ptr; 210 struct dev_table_entry *dev_table = get_dev_table(iommu); 211 212 ptr = &dev_table[dev_data->devid]; 213 214 spin_lock_irqsave(&dev_data->dte_lock, flags); 215 dte->data128[0] = ptr->data128[0]; 216 dte->data128[1] = ptr->data128[1]; 217 spin_unlock_irqrestore(&dev_data->dte_lock, flags); 218 } 219 220 static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom) 221 { 222 return (pdom && (pdom->pd_mode == PD_MODE_V2)); 223 } 224 225 static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom) 226 { 227 return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); 228 } 229 230 /* 231 * We cannot support PASID w/ existing v1 page table in the same domain 232 * since it will be nested. However, existing domain w/ v2 page table 233 * or passthrough mode can be used for PASID. 234 */ 235 static inline bool pdom_is_sva_capable(struct protection_domain *pdom) 236 { 237 return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom); 238 } 239 240 static inline int get_acpihid_device_id(struct device *dev, 241 struct acpihid_map_entry **entry) 242 { 243 struct acpi_device *adev = ACPI_COMPANION(dev); 244 struct acpihid_map_entry *p; 245 246 if (!adev) 247 return -ENODEV; 248 249 list_for_each_entry(p, &acpihid_map, list) { 250 if (acpi_dev_hid_uid_match(adev, p->hid, 251 p->uid[0] ? p->uid : NULL)) { 252 if (entry) 253 *entry = p; 254 return p->devid; 255 } 256 } 257 return -EINVAL; 258 } 259 260 static inline int get_device_sbdf_id(struct device *dev) 261 { 262 int sbdf; 263 264 if (dev_is_pci(dev)) 265 sbdf = get_pci_sbdf_id(to_pci_dev(dev)); 266 else 267 sbdf = get_acpihid_device_id(dev, NULL); 268 269 return sbdf; 270 } 271 272 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) 273 { 274 struct dev_table_entry *dev_table; 275 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 276 277 BUG_ON(pci_seg == NULL); 278 dev_table = pci_seg->dev_table; 279 BUG_ON(dev_table == NULL); 280 281 return dev_table; 282 } 283 284 static inline u16 get_device_segment(struct device *dev) 285 { 286 u16 seg; 287 288 if (dev_is_pci(dev)) { 289 struct pci_dev *pdev = to_pci_dev(dev); 290 291 seg = pci_domain_nr(pdev->bus); 292 } else { 293 u32 devid = get_acpihid_device_id(dev, NULL); 294 295 seg = PCI_SBDF_TO_SEGID(devid); 296 } 297 298 return seg; 299 } 300 301 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */ 302 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) 303 { 304 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 305 306 pci_seg->rlookup_table[devid] = iommu; 307 } 308 309 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid) 310 { 311 struct amd_iommu_pci_seg *pci_seg; 312 313 for_each_pci_segment(pci_seg) { 314 if (pci_seg->id == seg) 315 return pci_seg->rlookup_table[devid]; 316 } 317 return NULL; 318 } 319 320 static struct amd_iommu *rlookup_amd_iommu(struct device *dev) 321 { 322 u16 seg = get_device_segment(dev); 323 int devid = get_device_sbdf_id(dev); 324 325 if (devid < 0) 326 return NULL; 327 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); 328 } 329 330 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) 331 { 332 struct iommu_dev_data *dev_data; 333 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 334 335 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 336 if (!dev_data) 337 return NULL; 338 339 mutex_init(&dev_data->mutex); 340 spin_lock_init(&dev_data->dte_lock); 341 dev_data->devid = devid; 342 ratelimit_default_init(&dev_data->rs); 343 344 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); 345 return dev_data; 346 } 347 348 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) 349 { 350 struct iommu_dev_data *dev_data; 351 struct llist_node *node; 352 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 353 354 if (llist_empty(&pci_seg->dev_data_list)) 355 return NULL; 356 357 node = pci_seg->dev_data_list.first; 358 llist_for_each_entry(dev_data, node, dev_data_list) { 359 if (dev_data->devid == devid) 360 return dev_data; 361 } 362 363 return NULL; 364 } 365 366 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) 367 { 368 struct dev_table_entry new; 369 struct amd_iommu *iommu; 370 struct iommu_dev_data *dev_data, *alias_data; 371 u16 devid = pci_dev_id(pdev); 372 int ret = 0; 373 374 if (devid == alias) 375 return 0; 376 377 iommu = rlookup_amd_iommu(&pdev->dev); 378 if (!iommu) 379 return 0; 380 381 /* Copy the data from pdev */ 382 dev_data = dev_iommu_priv_get(&pdev->dev); 383 if (!dev_data) { 384 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); 385 ret = -EINVAL; 386 goto out; 387 } 388 get_dte256(iommu, dev_data, &new); 389 390 /* Setup alias */ 391 alias_data = find_dev_data(iommu, alias); 392 if (!alias_data) { 393 pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias); 394 ret = -EINVAL; 395 goto out; 396 } 397 update_dte256(iommu, alias_data, &new); 398 399 amd_iommu_set_rlookup_table(iommu, alias); 400 out: 401 return ret; 402 } 403 404 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) 405 { 406 struct pci_dev *pdev; 407 408 if (!dev_is_pci(dev)) 409 return; 410 pdev = to_pci_dev(dev); 411 412 /* 413 * The IVRS alias stored in the alias table may not be 414 * part of the PCI DMA aliases if it's bus differs 415 * from the original device. 416 */ 417 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); 418 419 pci_for_each_dma_alias(pdev, clone_alias, NULL); 420 } 421 422 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) 423 { 424 struct pci_dev *pdev = to_pci_dev(dev); 425 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 426 u16 ivrs_alias; 427 428 /* For ACPI HID devices, there are no aliases */ 429 if (!dev_is_pci(dev)) 430 return; 431 432 /* 433 * Add the IVRS alias to the pci aliases if it is on the same 434 * bus. The IVRS table may know about a quirk that we don't. 435 */ 436 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)]; 437 if (ivrs_alias != pci_dev_id(pdev) && 438 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) 439 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); 440 441 clone_aliases(iommu, dev); 442 } 443 444 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) 445 { 446 struct iommu_dev_data *dev_data; 447 448 dev_data = search_dev_data(iommu, devid); 449 450 if (dev_data == NULL) { 451 dev_data = alloc_dev_data(iommu, devid); 452 if (!dev_data) 453 return NULL; 454 455 if (translation_pre_enabled(iommu)) 456 dev_data->defer_attach = true; 457 } 458 459 return dev_data; 460 } 461 462 /* 463 * Find or create an IOMMU group for a acpihid device. 464 */ 465 static struct iommu_group *acpihid_device_group(struct device *dev) 466 { 467 struct acpihid_map_entry *p, *entry = NULL; 468 int devid; 469 470 devid = get_acpihid_device_id(dev, &entry); 471 if (devid < 0) 472 return ERR_PTR(devid); 473 474 list_for_each_entry(p, &acpihid_map, list) { 475 if ((devid == p->devid) && p->group) 476 entry->group = p->group; 477 } 478 479 if (!entry->group) 480 entry->group = generic_device_group(dev); 481 else 482 iommu_group_ref_get(entry->group); 483 484 return entry->group; 485 } 486 487 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) 488 { 489 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); 490 } 491 492 static u32 pdev_get_caps(struct pci_dev *pdev) 493 { 494 int features; 495 u32 flags = 0; 496 497 if (pci_ats_supported(pdev)) 498 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; 499 500 if (pci_pri_supported(pdev)) 501 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; 502 503 features = pci_pasid_features(pdev); 504 if (features >= 0) { 505 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; 506 507 if (features & PCI_PASID_CAP_EXEC) 508 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; 509 510 if (features & PCI_PASID_CAP_PRIV) 511 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; 512 } 513 514 return flags; 515 } 516 517 static inline int pdev_enable_cap_ats(struct pci_dev *pdev) 518 { 519 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 520 int ret = -EINVAL; 521 522 if (dev_data->ats_enabled) 523 return 0; 524 525 if (amd_iommu_iotlb_sup && 526 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { 527 ret = pci_enable_ats(pdev, PAGE_SHIFT); 528 if (!ret) { 529 dev_data->ats_enabled = 1; 530 dev_data->ats_qdep = pci_ats_queue_depth(pdev); 531 } 532 } 533 534 return ret; 535 } 536 537 static inline void pdev_disable_cap_ats(struct pci_dev *pdev) 538 { 539 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 540 541 if (dev_data->ats_enabled) { 542 pci_disable_ats(pdev); 543 dev_data->ats_enabled = 0; 544 } 545 } 546 547 static inline int pdev_enable_cap_pri(struct pci_dev *pdev) 548 { 549 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 550 int ret = -EINVAL; 551 552 if (dev_data->pri_enabled) 553 return 0; 554 555 if (!dev_data->ats_enabled) 556 return 0; 557 558 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { 559 /* 560 * First reset the PRI state of the device. 561 * FIXME: Hardcode number of outstanding requests for now 562 */ 563 if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) { 564 dev_data->pri_enabled = 1; 565 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); 566 567 ret = 0; 568 } 569 } 570 571 return ret; 572 } 573 574 static inline void pdev_disable_cap_pri(struct pci_dev *pdev) 575 { 576 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 577 578 if (dev_data->pri_enabled) { 579 pci_disable_pri(pdev); 580 dev_data->pri_enabled = 0; 581 } 582 } 583 584 static inline int pdev_enable_cap_pasid(struct pci_dev *pdev) 585 { 586 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 587 int ret = -EINVAL; 588 589 if (dev_data->pasid_enabled) 590 return 0; 591 592 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { 593 /* Only allow access to user-accessible pages */ 594 ret = pci_enable_pasid(pdev, 0); 595 if (!ret) 596 dev_data->pasid_enabled = 1; 597 } 598 599 return ret; 600 } 601 602 static inline void pdev_disable_cap_pasid(struct pci_dev *pdev) 603 { 604 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); 605 606 if (dev_data->pasid_enabled) { 607 pci_disable_pasid(pdev); 608 dev_data->pasid_enabled = 0; 609 } 610 } 611 612 static void pdev_enable_caps(struct pci_dev *pdev) 613 { 614 pdev_enable_cap_ats(pdev); 615 pdev_enable_cap_pasid(pdev); 616 pdev_enable_cap_pri(pdev); 617 } 618 619 static void pdev_disable_caps(struct pci_dev *pdev) 620 { 621 pdev_disable_cap_ats(pdev); 622 pdev_disable_cap_pasid(pdev); 623 pdev_disable_cap_pri(pdev); 624 } 625 626 /* 627 * This function checks if the driver got a valid device from the caller to 628 * avoid dereferencing invalid pointers. 629 */ 630 static bool check_device(struct device *dev) 631 { 632 struct amd_iommu_pci_seg *pci_seg; 633 struct amd_iommu *iommu; 634 int devid, sbdf; 635 636 if (!dev) 637 return false; 638 639 sbdf = get_device_sbdf_id(dev); 640 if (sbdf < 0) 641 return false; 642 devid = PCI_SBDF_TO_DEVID(sbdf); 643 644 iommu = rlookup_amd_iommu(dev); 645 if (!iommu) 646 return false; 647 648 /* Out of our scope? */ 649 pci_seg = iommu->pci_seg; 650 if (devid > pci_seg->last_bdf) 651 return false; 652 653 return true; 654 } 655 656 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) 657 { 658 struct iommu_dev_data *dev_data; 659 int devid, sbdf; 660 661 if (dev_iommu_priv_get(dev)) 662 return 0; 663 664 sbdf = get_device_sbdf_id(dev); 665 if (sbdf < 0) 666 return sbdf; 667 668 devid = PCI_SBDF_TO_DEVID(sbdf); 669 dev_data = find_dev_data(iommu, devid); 670 if (!dev_data) 671 return -ENOMEM; 672 673 dev_data->dev = dev; 674 675 /* 676 * The dev_iommu_priv_set() needes to be called before setup_aliases. 677 * Otherwise, subsequent call to dev_iommu_priv_get() will fail. 678 */ 679 dev_iommu_priv_set(dev, dev_data); 680 setup_aliases(iommu, dev); 681 682 /* 683 * By default we use passthrough mode for IOMMUv2 capable device. 684 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to 685 * invalid address), we ignore the capability for the device so 686 * it'll be forced to go into translation mode. 687 */ 688 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && 689 dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) { 690 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); 691 } 692 693 return 0; 694 } 695 696 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) 697 { 698 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 699 struct dev_table_entry *dev_table = get_dev_table(iommu); 700 int devid, sbdf; 701 702 sbdf = get_device_sbdf_id(dev); 703 if (sbdf < 0) 704 return; 705 706 devid = PCI_SBDF_TO_DEVID(sbdf); 707 pci_seg->rlookup_table[devid] = NULL; 708 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry)); 709 710 setup_aliases(iommu, dev); 711 } 712 713 714 /**************************************************************************** 715 * 716 * Interrupt handling functions 717 * 718 ****************************************************************************/ 719 720 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) 721 { 722 int i; 723 struct dev_table_entry dte; 724 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); 725 726 get_dte256(iommu, dev_data, &dte); 727 728 for (i = 0; i < 4; ++i) 729 pr_err("DTE[%d]: %016llx\n", i, dte.data[i]); 730 } 731 732 static void dump_command(unsigned long phys_addr) 733 { 734 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr); 735 int i; 736 737 for (i = 0; i < 4; ++i) 738 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); 739 } 740 741 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) 742 { 743 struct iommu_dev_data *dev_data = NULL; 744 int devid, vmg_tag, flags; 745 struct pci_dev *pdev; 746 u64 spa; 747 748 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 749 vmg_tag = (event[1]) & 0xFFFF; 750 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 751 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); 752 753 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 754 devid & 0xff); 755 if (pdev) 756 dev_data = dev_iommu_priv_get(&pdev->dev); 757 758 if (dev_data) { 759 if (__ratelimit(&dev_data->rs)) { 760 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", 761 vmg_tag, spa, flags); 762 } 763 } else { 764 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", 765 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 766 vmg_tag, spa, flags); 767 } 768 769 if (pdev) 770 pci_dev_put(pdev); 771 } 772 773 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) 774 { 775 struct iommu_dev_data *dev_data = NULL; 776 int devid, flags_rmp, vmg_tag, flags; 777 struct pci_dev *pdev; 778 u64 gpa; 779 780 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 781 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF; 782 vmg_tag = (event[1]) & 0xFFFF; 783 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 784 gpa = ((u64)event[3] << 32) | event[2]; 785 786 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 787 devid & 0xff); 788 if (pdev) 789 dev_data = dev_iommu_priv_get(&pdev->dev); 790 791 if (dev_data) { 792 if (__ratelimit(&dev_data->rs)) { 793 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", 794 vmg_tag, gpa, flags_rmp, flags); 795 } 796 } else { 797 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", 798 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 799 vmg_tag, gpa, flags_rmp, flags); 800 } 801 802 if (pdev) 803 pci_dev_put(pdev); 804 } 805 806 #define IS_IOMMU_MEM_TRANSACTION(flags) \ 807 (((flags) & EVENT_FLAG_I) == 0) 808 809 #define IS_WRITE_REQUEST(flags) \ 810 ((flags) & EVENT_FLAG_RW) 811 812 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, 813 u16 devid, u16 domain_id, 814 u64 address, int flags) 815 { 816 struct iommu_dev_data *dev_data = NULL; 817 struct pci_dev *pdev; 818 819 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), 820 devid & 0xff); 821 if (pdev) 822 dev_data = dev_iommu_priv_get(&pdev->dev); 823 824 if (dev_data) { 825 /* 826 * If this is a DMA fault (for which the I(nterrupt) 827 * bit will be unset), allow report_iommu_fault() to 828 * prevent logging it. 829 */ 830 if (IS_IOMMU_MEM_TRANSACTION(flags)) { 831 /* Device not attached to domain properly */ 832 if (dev_data->domain == NULL) { 833 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n"); 834 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n", 835 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), 836 PCI_FUNC(devid), domain_id); 837 goto out; 838 } 839 840 if (!report_iommu_fault(&dev_data->domain->domain, 841 &pdev->dev, address, 842 IS_WRITE_REQUEST(flags) ? 843 IOMMU_FAULT_WRITE : 844 IOMMU_FAULT_READ)) 845 goto out; 846 } 847 848 if (__ratelimit(&dev_data->rs)) { 849 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", 850 domain_id, address, flags); 851 } 852 } else { 853 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", 854 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 855 domain_id, address, flags); 856 } 857 858 out: 859 if (pdev) 860 pci_dev_put(pdev); 861 } 862 863 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) 864 { 865 struct device *dev = iommu->iommu.dev; 866 int type, devid, flags, tag; 867 volatile u32 *event = __evt; 868 int count = 0; 869 u64 address, ctrl; 870 u32 pasid; 871 872 retry: 873 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 874 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 875 pasid = (event[0] & EVENT_DOMID_MASK_HI) | 876 (event[1] & EVENT_DOMID_MASK_LO); 877 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 878 address = (u64)(((u64)event[3]) << 32) | event[2]; 879 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 880 881 if (type == 0) { 882 /* Did we hit the erratum? */ 883 if (++count == LOOP_TIMEOUT) { 884 pr_err("No event written to event log\n"); 885 return; 886 } 887 udelay(1); 888 goto retry; 889 } 890 891 if (type == EVENT_TYPE_IO_FAULT) { 892 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); 893 return; 894 } 895 896 switch (type) { 897 case EVENT_TYPE_ILL_DEV: 898 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", 899 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 900 pasid, address, flags); 901 dev_err(dev, "Control Reg : 0x%llx\n", ctrl); 902 dump_dte_entry(iommu, devid); 903 break; 904 case EVENT_TYPE_DEV_TAB_ERR: 905 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x " 906 "address=0x%llx flags=0x%04x]\n", 907 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 908 address, flags); 909 break; 910 case EVENT_TYPE_PAGE_TAB_ERR: 911 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", 912 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 913 pasid, address, flags); 914 break; 915 case EVENT_TYPE_ILL_CMD: 916 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address); 917 dump_command(address); 918 break; 919 case EVENT_TYPE_CMD_HARD_ERR: 920 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n", 921 address, flags); 922 break; 923 case EVENT_TYPE_IOTLB_INV_TO: 924 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n", 925 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 926 address); 927 break; 928 case EVENT_TYPE_INV_DEV_REQ: 929 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", 930 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 931 pasid, address, flags); 932 break; 933 case EVENT_TYPE_RMP_FAULT: 934 amd_iommu_report_rmp_fault(iommu, event); 935 break; 936 case EVENT_TYPE_RMP_HW_ERR: 937 amd_iommu_report_rmp_hw_error(iommu, event); 938 break; 939 case EVENT_TYPE_INV_PPR_REQ: 940 pasid = PPR_PASID(*((u64 *)__evt)); 941 tag = event[1] & 0x03FF; 942 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", 943 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), 944 pasid, address, flags, tag); 945 break; 946 default: 947 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", 948 event[0], event[1], event[2], event[3]); 949 } 950 951 /* 952 * To detect the hardware errata 732 we need to clear the 953 * entry back to zero. This issue does not exist on SNP 954 * enabled system. Also this buffer is not writeable on 955 * SNP enabled system. 956 */ 957 if (!amd_iommu_snp_en) 958 memset(__evt, 0, 4 * sizeof(u32)); 959 } 960 961 static void iommu_poll_events(struct amd_iommu *iommu) 962 { 963 u32 head, tail; 964 965 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 966 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 967 968 while (head != tail) { 969 iommu_print_event(iommu, iommu->evt_buf + head); 970 971 /* Update head pointer of hardware ring-buffer */ 972 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; 973 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 974 } 975 976 } 977 978 #ifdef CONFIG_IRQ_REMAP 979 static int (*iommu_ga_log_notifier)(u32); 980 981 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) 982 { 983 iommu_ga_log_notifier = notifier; 984 985 return 0; 986 } 987 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); 988 989 static void iommu_poll_ga_log(struct amd_iommu *iommu) 990 { 991 u32 head, tail; 992 993 if (iommu->ga_log == NULL) 994 return; 995 996 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 997 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 998 999 while (head != tail) { 1000 volatile u64 *raw; 1001 u64 log_entry; 1002 1003 raw = (u64 *)(iommu->ga_log + head); 1004 1005 /* Avoid memcpy function-call overhead */ 1006 log_entry = *raw; 1007 1008 /* Update head pointer of hardware ring-buffer */ 1009 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE; 1010 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 1011 1012 /* Handle GA entry */ 1013 switch (GA_REQ_TYPE(log_entry)) { 1014 case GA_GUEST_NR: 1015 if (!iommu_ga_log_notifier) 1016 break; 1017 1018 pr_debug("%s: devid=%#x, ga_tag=%#x\n", 1019 __func__, GA_DEVID(log_entry), 1020 GA_TAG(log_entry)); 1021 1022 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0) 1023 pr_err("GA log notifier failed.\n"); 1024 break; 1025 default: 1026 break; 1027 } 1028 } 1029 } 1030 1031 static void 1032 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) 1033 { 1034 if (!irq_remapping_enabled || !dev_is_pci(dev) || 1035 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev))) 1036 return; 1037 1038 dev_set_msi_domain(dev, iommu->ir_domain); 1039 } 1040 1041 #else /* CONFIG_IRQ_REMAP */ 1042 static inline void 1043 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } 1044 #endif /* !CONFIG_IRQ_REMAP */ 1045 1046 static void amd_iommu_handle_irq(void *data, const char *evt_type, 1047 u32 int_mask, u32 overflow_mask, 1048 void (*int_handler)(struct amd_iommu *), 1049 void (*overflow_handler)(struct amd_iommu *)) 1050 { 1051 struct amd_iommu *iommu = (struct amd_iommu *) data; 1052 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 1053 u32 mask = int_mask | overflow_mask; 1054 1055 while (status & mask) { 1056 /* Enable interrupt sources again */ 1057 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 1058 1059 if (int_handler) { 1060 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", 1061 iommu->index, evt_type); 1062 int_handler(iommu); 1063 } 1064 1065 if ((status & overflow_mask) && overflow_handler) 1066 overflow_handler(iommu); 1067 1068 /* 1069 * Hardware bug: ERBT1312 1070 * When re-enabling interrupt (by writing 1 1071 * to clear the bit), the hardware might also try to set 1072 * the interrupt bit in the event status register. 1073 * In this scenario, the bit will be set, and disable 1074 * subsequent interrupts. 1075 * 1076 * Workaround: The IOMMU driver should read back the 1077 * status register and check if the interrupt bits are cleared. 1078 * If not, driver will need to go through the interrupt handler 1079 * again and re-clear the bits 1080 */ 1081 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 1082 } 1083 } 1084 1085 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data) 1086 { 1087 amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK, 1088 MMIO_STATUS_EVT_OVERFLOW_MASK, 1089 iommu_poll_events, amd_iommu_restart_event_logging); 1090 1091 return IRQ_HANDLED; 1092 } 1093 1094 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) 1095 { 1096 amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, 1097 MMIO_STATUS_PPR_OVERFLOW_MASK, 1098 amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log); 1099 1100 return IRQ_HANDLED; 1101 } 1102 1103 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data) 1104 { 1105 #ifdef CONFIG_IRQ_REMAP 1106 amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK, 1107 MMIO_STATUS_GALOG_OVERFLOW_MASK, 1108 iommu_poll_ga_log, amd_iommu_restart_ga_log); 1109 #endif 1110 1111 return IRQ_HANDLED; 1112 } 1113 1114 irqreturn_t amd_iommu_int_thread(int irq, void *data) 1115 { 1116 amd_iommu_int_thread_evtlog(irq, data); 1117 amd_iommu_int_thread_pprlog(irq, data); 1118 amd_iommu_int_thread_galog(irq, data); 1119 1120 return IRQ_HANDLED; 1121 } 1122 1123 irqreturn_t amd_iommu_int_handler(int irq, void *data) 1124 { 1125 return IRQ_WAKE_THREAD; 1126 } 1127 1128 /**************************************************************************** 1129 * 1130 * IOMMU command queuing functions 1131 * 1132 ****************************************************************************/ 1133 1134 static int wait_on_sem(struct amd_iommu *iommu, u64 data) 1135 { 1136 int i = 0; 1137 1138 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { 1139 udelay(1); 1140 i += 1; 1141 } 1142 1143 if (i == LOOP_TIMEOUT) { 1144 pr_alert("Completion-Wait loop timed out\n"); 1145 return -EIO; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static void copy_cmd_to_buffer(struct amd_iommu *iommu, 1152 struct iommu_cmd *cmd) 1153 { 1154 u8 *target; 1155 u32 tail; 1156 1157 /* Copy command to buffer */ 1158 tail = iommu->cmd_buf_tail; 1159 target = iommu->cmd_buf + tail; 1160 memcpy(target, cmd, sizeof(*cmd)); 1161 1162 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; 1163 iommu->cmd_buf_tail = tail; 1164 1165 /* Tell the IOMMU about it */ 1166 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 1167 } 1168 1169 static void build_completion_wait(struct iommu_cmd *cmd, 1170 struct amd_iommu *iommu, 1171 u64 data) 1172 { 1173 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); 1174 1175 memset(cmd, 0, sizeof(*cmd)); 1176 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; 1177 cmd->data[1] = upper_32_bits(paddr); 1178 cmd->data[2] = lower_32_bits(data); 1179 cmd->data[3] = upper_32_bits(data); 1180 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); 1181 } 1182 1183 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) 1184 { 1185 memset(cmd, 0, sizeof(*cmd)); 1186 cmd->data[0] = devid; 1187 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); 1188 } 1189 1190 /* 1191 * Builds an invalidation address which is suitable for one page or multiple 1192 * pages. Sets the size bit (S) as needed is more than one page is flushed. 1193 */ 1194 static inline u64 build_inv_address(u64 address, size_t size) 1195 { 1196 u64 pages, end, msb_diff; 1197 1198 pages = iommu_num_pages(address, size, PAGE_SIZE); 1199 1200 if (pages == 1) 1201 return address & PAGE_MASK; 1202 1203 end = address + size - 1; 1204 1205 /* 1206 * msb_diff would hold the index of the most significant bit that 1207 * flipped between the start and end. 1208 */ 1209 msb_diff = fls64(end ^ address) - 1; 1210 1211 /* 1212 * Bits 63:52 are sign extended. If for some reason bit 51 is different 1213 * between the start and the end, invalidate everything. 1214 */ 1215 if (unlikely(msb_diff > 51)) { 1216 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 1217 } else { 1218 /* 1219 * The msb-bit must be clear on the address. Just set all the 1220 * lower bits. 1221 */ 1222 address |= (1ull << msb_diff) - 1; 1223 } 1224 1225 /* Clear bits 11:0 */ 1226 address &= PAGE_MASK; 1227 1228 /* Set the size bit - we flush more than one 4kb page */ 1229 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK; 1230 } 1231 1232 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, 1233 size_t size, u16 domid, 1234 ioasid_t pasid, bool gn) 1235 { 1236 u64 inv_address = build_inv_address(address, size); 1237 1238 memset(cmd, 0, sizeof(*cmd)); 1239 1240 cmd->data[1] |= domid; 1241 cmd->data[2] = lower_32_bits(inv_address); 1242 cmd->data[3] = upper_32_bits(inv_address); 1243 /* PDE bit - we want to flush everything, not only the PTEs */ 1244 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 1245 if (gn) { 1246 cmd->data[0] |= pasid; 1247 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 1248 } 1249 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); 1250 } 1251 1252 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, 1253 u64 address, size_t size, 1254 ioasid_t pasid, bool gn) 1255 { 1256 u64 inv_address = build_inv_address(address, size); 1257 1258 memset(cmd, 0, sizeof(*cmd)); 1259 1260 cmd->data[0] = devid; 1261 cmd->data[0] |= (qdep & 0xff) << 24; 1262 cmd->data[1] = devid; 1263 cmd->data[2] = lower_32_bits(inv_address); 1264 cmd->data[3] = upper_32_bits(inv_address); 1265 if (gn) { 1266 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; 1267 cmd->data[1] |= (pasid & 0xff) << 16; 1268 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 1269 } 1270 1271 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); 1272 } 1273 1274 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid, 1275 int status, int tag, u8 gn) 1276 { 1277 memset(cmd, 0, sizeof(*cmd)); 1278 1279 cmd->data[0] = devid; 1280 if (gn) { 1281 cmd->data[1] = pasid; 1282 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; 1283 } 1284 cmd->data[3] = tag & 0x1ff; 1285 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; 1286 1287 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR); 1288 } 1289 1290 static void build_inv_all(struct iommu_cmd *cmd) 1291 { 1292 memset(cmd, 0, sizeof(*cmd)); 1293 CMD_SET_TYPE(cmd, CMD_INV_ALL); 1294 } 1295 1296 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) 1297 { 1298 memset(cmd, 0, sizeof(*cmd)); 1299 cmd->data[0] = devid; 1300 CMD_SET_TYPE(cmd, CMD_INV_IRT); 1301 } 1302 1303 /* 1304 * Writes the command to the IOMMUs command buffer and informs the 1305 * hardware about the new command. 1306 */ 1307 static int __iommu_queue_command_sync(struct amd_iommu *iommu, 1308 struct iommu_cmd *cmd, 1309 bool sync) 1310 { 1311 unsigned int count = 0; 1312 u32 left, next_tail; 1313 1314 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; 1315 again: 1316 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; 1317 1318 if (left <= 0x20) { 1319 /* Skip udelay() the first time around */ 1320 if (count++) { 1321 if (count == LOOP_TIMEOUT) { 1322 pr_err("Command buffer timeout\n"); 1323 return -EIO; 1324 } 1325 1326 udelay(1); 1327 } 1328 1329 /* Update head and recheck remaining space */ 1330 iommu->cmd_buf_head = readl(iommu->mmio_base + 1331 MMIO_CMD_HEAD_OFFSET); 1332 1333 goto again; 1334 } 1335 1336 copy_cmd_to_buffer(iommu, cmd); 1337 1338 /* Do we need to make sure all commands are processed? */ 1339 iommu->need_sync = sync; 1340 1341 return 0; 1342 } 1343 1344 static int iommu_queue_command_sync(struct amd_iommu *iommu, 1345 struct iommu_cmd *cmd, 1346 bool sync) 1347 { 1348 unsigned long flags; 1349 int ret; 1350 1351 raw_spin_lock_irqsave(&iommu->lock, flags); 1352 ret = __iommu_queue_command_sync(iommu, cmd, sync); 1353 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1354 1355 return ret; 1356 } 1357 1358 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 1359 { 1360 return iommu_queue_command_sync(iommu, cmd, true); 1361 } 1362 1363 /* 1364 * This function queues a completion wait command into the command 1365 * buffer of an IOMMU 1366 */ 1367 static int iommu_completion_wait(struct amd_iommu *iommu) 1368 { 1369 struct iommu_cmd cmd; 1370 unsigned long flags; 1371 int ret; 1372 u64 data; 1373 1374 if (!iommu->need_sync) 1375 return 0; 1376 1377 data = atomic64_inc_return(&iommu->cmd_sem_val); 1378 build_completion_wait(&cmd, iommu, data); 1379 1380 raw_spin_lock_irqsave(&iommu->lock, flags); 1381 1382 ret = __iommu_queue_command_sync(iommu, &cmd, false); 1383 if (ret) 1384 goto out_unlock; 1385 1386 ret = wait_on_sem(iommu, data); 1387 1388 out_unlock: 1389 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1390 1391 return ret; 1392 } 1393 1394 static void domain_flush_complete(struct protection_domain *domain) 1395 { 1396 struct pdom_iommu_info *pdom_iommu_info; 1397 unsigned long i; 1398 1399 lockdep_assert_held(&domain->lock); 1400 1401 /* 1402 * Devices of this domain are behind this IOMMU 1403 * We need to wait for completion of all commands. 1404 */ 1405 xa_for_each(&domain->iommu_array, i, pdom_iommu_info) 1406 iommu_completion_wait(pdom_iommu_info->iommu); 1407 } 1408 1409 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) 1410 { 1411 struct iommu_cmd cmd; 1412 1413 build_inv_dte(&cmd, devid); 1414 1415 return iommu_queue_command(iommu, &cmd); 1416 } 1417 1418 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) 1419 { 1420 int ret; 1421 1422 ret = iommu_flush_dte(iommu, devid); 1423 if (!ret) 1424 iommu_completion_wait(iommu); 1425 } 1426 1427 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) 1428 { 1429 u32 devid; 1430 u16 last_bdf = iommu->pci_seg->last_bdf; 1431 1432 for (devid = 0; devid <= last_bdf; ++devid) 1433 iommu_flush_dte(iommu, devid); 1434 1435 iommu_completion_wait(iommu); 1436 } 1437 1438 /* 1439 * This function uses heavy locking and may disable irqs for some time. But 1440 * this is no issue because it is only called during resume. 1441 */ 1442 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) 1443 { 1444 u32 dom_id; 1445 u16 last_bdf = iommu->pci_seg->last_bdf; 1446 1447 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) { 1448 struct iommu_cmd cmd; 1449 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1450 dom_id, IOMMU_NO_PASID, false); 1451 iommu_queue_command(iommu, &cmd); 1452 } 1453 1454 iommu_completion_wait(iommu); 1455 } 1456 1457 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) 1458 { 1459 struct iommu_cmd cmd; 1460 1461 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1462 dom_id, IOMMU_NO_PASID, false); 1463 iommu_queue_command(iommu, &cmd); 1464 1465 iommu_completion_wait(iommu); 1466 } 1467 1468 static void amd_iommu_flush_all(struct amd_iommu *iommu) 1469 { 1470 struct iommu_cmd cmd; 1471 1472 build_inv_all(&cmd); 1473 1474 iommu_queue_command(iommu, &cmd); 1475 iommu_completion_wait(iommu); 1476 } 1477 1478 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) 1479 { 1480 struct iommu_cmd cmd; 1481 1482 build_inv_irt(&cmd, devid); 1483 1484 iommu_queue_command(iommu, &cmd); 1485 } 1486 1487 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) 1488 { 1489 u32 devid; 1490 u16 last_bdf = iommu->pci_seg->last_bdf; 1491 1492 if (iommu->irtcachedis_enabled) 1493 return; 1494 1495 for (devid = 0; devid <= last_bdf; devid++) 1496 iommu_flush_irt(iommu, devid); 1497 1498 iommu_completion_wait(iommu); 1499 } 1500 1501 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) 1502 { 1503 if (check_feature(FEATURE_IA)) { 1504 amd_iommu_flush_all(iommu); 1505 } else { 1506 amd_iommu_flush_dte_all(iommu); 1507 amd_iommu_flush_irt_all(iommu); 1508 amd_iommu_flush_tlb_all(iommu); 1509 } 1510 } 1511 1512 /* 1513 * Command send function for flushing on-device TLB 1514 */ 1515 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, 1516 size_t size, ioasid_t pasid, bool gn) 1517 { 1518 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 1519 struct iommu_cmd cmd; 1520 int qdep = dev_data->ats_qdep; 1521 1522 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, 1523 size, pasid, gn); 1524 1525 return iommu_queue_command(iommu, &cmd); 1526 } 1527 1528 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) 1529 { 1530 struct amd_iommu *iommu = data; 1531 1532 return iommu_flush_dte(iommu, alias); 1533 } 1534 1535 /* 1536 * Command send function for invalidating a device table entry 1537 */ 1538 static int device_flush_dte(struct iommu_dev_data *dev_data) 1539 { 1540 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 1541 struct pci_dev *pdev = NULL; 1542 struct amd_iommu_pci_seg *pci_seg; 1543 u16 alias; 1544 int ret; 1545 1546 if (dev_is_pci(dev_data->dev)) 1547 pdev = to_pci_dev(dev_data->dev); 1548 1549 if (pdev) 1550 ret = pci_for_each_dma_alias(pdev, 1551 device_flush_dte_alias, iommu); 1552 else 1553 ret = iommu_flush_dte(iommu, dev_data->devid); 1554 if (ret) 1555 return ret; 1556 1557 pci_seg = iommu->pci_seg; 1558 alias = pci_seg->alias_table[dev_data->devid]; 1559 if (alias != dev_data->devid) { 1560 ret = iommu_flush_dte(iommu, alias); 1561 if (ret) 1562 return ret; 1563 } 1564 1565 if (dev_data->ats_enabled) { 1566 /* Invalidate the entire contents of an IOTLB */ 1567 ret = device_flush_iotlb(dev_data, 0, ~0UL, 1568 IOMMU_NO_PASID, false); 1569 } 1570 1571 return ret; 1572 } 1573 1574 static int domain_flush_pages_v2(struct protection_domain *pdom, 1575 u64 address, size_t size) 1576 { 1577 struct iommu_dev_data *dev_data; 1578 struct iommu_cmd cmd; 1579 int ret = 0; 1580 1581 lockdep_assert_held(&pdom->lock); 1582 list_for_each_entry(dev_data, &pdom->dev_list, list) { 1583 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 1584 u16 domid = dev_data->gcr3_info.domid; 1585 1586 build_inv_iommu_pages(&cmd, address, size, 1587 domid, IOMMU_NO_PASID, true); 1588 1589 ret |= iommu_queue_command(iommu, &cmd); 1590 } 1591 1592 return ret; 1593 } 1594 1595 static int domain_flush_pages_v1(struct protection_domain *pdom, 1596 u64 address, size_t size) 1597 { 1598 struct pdom_iommu_info *pdom_iommu_info; 1599 struct iommu_cmd cmd; 1600 int ret = 0; 1601 unsigned long i; 1602 1603 lockdep_assert_held(&pdom->lock); 1604 1605 build_inv_iommu_pages(&cmd, address, size, 1606 pdom->id, IOMMU_NO_PASID, false); 1607 1608 xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { 1609 /* 1610 * Devices of this domain are behind this IOMMU 1611 * We need a TLB flush 1612 */ 1613 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); 1614 } 1615 1616 return ret; 1617 } 1618 1619 /* 1620 * TLB invalidation function which is called from the mapping functions. 1621 * It flushes range of PTEs of the domain. 1622 */ 1623 static void __domain_flush_pages(struct protection_domain *domain, 1624 u64 address, size_t size) 1625 { 1626 struct iommu_dev_data *dev_data; 1627 int ret = 0; 1628 ioasid_t pasid = IOMMU_NO_PASID; 1629 bool gn = false; 1630 1631 lockdep_assert_held(&domain->lock); 1632 1633 if (pdom_is_v2_pgtbl_mode(domain)) { 1634 gn = true; 1635 ret = domain_flush_pages_v2(domain, address, size); 1636 } else { 1637 ret = domain_flush_pages_v1(domain, address, size); 1638 } 1639 1640 list_for_each_entry(dev_data, &domain->dev_list, list) { 1641 1642 if (!dev_data->ats_enabled) 1643 continue; 1644 1645 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); 1646 } 1647 1648 WARN_ON(ret); 1649 } 1650 1651 void amd_iommu_domain_flush_pages(struct protection_domain *domain, 1652 u64 address, size_t size) 1653 { 1654 lockdep_assert_held(&domain->lock); 1655 1656 if (likely(!amd_iommu_np_cache)) { 1657 __domain_flush_pages(domain, address, size); 1658 1659 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ 1660 domain_flush_complete(domain); 1661 1662 return; 1663 } 1664 1665 /* 1666 * When NpCache is on, we infer that we run in a VM and use a vIOMMU. 1667 * In such setups it is best to avoid flushes of ranges which are not 1668 * naturally aligned, since it would lead to flushes of unmodified 1669 * PTEs. Such flushes would require the hypervisor to do more work than 1670 * necessary. Therefore, perform repeated flushes of aligned ranges 1671 * until you cover the range. Each iteration flushes the smaller 1672 * between the natural alignment of the address that we flush and the 1673 * greatest naturally aligned region that fits in the range. 1674 */ 1675 while (size != 0) { 1676 int addr_alignment = __ffs(address); 1677 int size_alignment = __fls(size); 1678 int min_alignment; 1679 size_t flush_size; 1680 1681 /* 1682 * size is always non-zero, but address might be zero, causing 1683 * addr_alignment to be negative. As the casting of the 1684 * argument in __ffs(address) to long might trim the high bits 1685 * of the address on x86-32, cast to long when doing the check. 1686 */ 1687 if (likely((unsigned long)address != 0)) 1688 min_alignment = min(addr_alignment, size_alignment); 1689 else 1690 min_alignment = size_alignment; 1691 1692 flush_size = 1ul << min_alignment; 1693 1694 __domain_flush_pages(domain, address, flush_size); 1695 address += flush_size; 1696 size -= flush_size; 1697 } 1698 1699 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ 1700 domain_flush_complete(domain); 1701 } 1702 1703 /* Flush the whole IO/TLB for a given protection domain - including PDE */ 1704 static void amd_iommu_domain_flush_all(struct protection_domain *domain) 1705 { 1706 amd_iommu_domain_flush_pages(domain, 0, 1707 CMD_INV_IOMMU_ALL_PAGES_ADDRESS); 1708 } 1709 1710 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, 1711 ioasid_t pasid, u64 address, size_t size) 1712 { 1713 struct iommu_cmd cmd; 1714 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 1715 1716 build_inv_iommu_pages(&cmd, address, size, 1717 dev_data->gcr3_info.domid, pasid, true); 1718 iommu_queue_command(iommu, &cmd); 1719 1720 if (dev_data->ats_enabled) 1721 device_flush_iotlb(dev_data, address, size, pasid, true); 1722 1723 iommu_completion_wait(iommu); 1724 } 1725 1726 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, 1727 ioasid_t pasid) 1728 { 1729 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, 1730 CMD_INV_IOMMU_ALL_PAGES_ADDRESS); 1731 } 1732 1733 /* Flush the not present cache if it exists */ 1734 static void domain_flush_np_cache(struct protection_domain *domain, 1735 dma_addr_t iova, size_t size) 1736 { 1737 if (unlikely(amd_iommu_np_cache)) { 1738 unsigned long flags; 1739 1740 spin_lock_irqsave(&domain->lock, flags); 1741 amd_iommu_domain_flush_pages(domain, iova, size); 1742 spin_unlock_irqrestore(&domain->lock, flags); 1743 } 1744 } 1745 1746 1747 /* 1748 * This function flushes the DTEs for all devices in domain 1749 */ 1750 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) 1751 { 1752 struct iommu_dev_data *dev_data; 1753 1754 lockdep_assert_held(&domain->lock); 1755 1756 list_for_each_entry(dev_data, &domain->dev_list, list) { 1757 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); 1758 1759 set_dte_entry(iommu, dev_data); 1760 clone_aliases(iommu, dev_data->dev); 1761 } 1762 1763 list_for_each_entry(dev_data, &domain->dev_list, list) 1764 device_flush_dte(dev_data); 1765 1766 domain_flush_complete(domain); 1767 } 1768 1769 int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) 1770 { 1771 struct iommu_dev_data *dev_data; 1772 struct amd_iommu *iommu; 1773 struct iommu_cmd cmd; 1774 1775 dev_data = dev_iommu_priv_get(dev); 1776 iommu = get_amd_iommu_from_dev(dev); 1777 1778 build_complete_ppr(&cmd, dev_data->devid, pasid, status, 1779 tag, dev_data->pri_tlp); 1780 1781 return iommu_queue_command(iommu, &cmd); 1782 } 1783 1784 /**************************************************************************** 1785 * 1786 * The next functions belong to the domain allocation. A domain is 1787 * allocated for every IOMMU as the default domain. If device isolation 1788 * is enabled, every device get its own domain. The most important thing 1789 * about domains is the page table mapping the DMA address space they 1790 * contain. 1791 * 1792 ****************************************************************************/ 1793 1794 static int pdom_id_alloc(void) 1795 { 1796 return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); 1797 } 1798 1799 static void pdom_id_free(int id) 1800 { 1801 ida_free(&pdom_ids, id); 1802 } 1803 1804 static void free_gcr3_tbl_level1(u64 *tbl) 1805 { 1806 u64 *ptr; 1807 int i; 1808 1809 for (i = 0; i < 512; ++i) { 1810 if (!(tbl[i] & GCR3_VALID)) 1811 continue; 1812 1813 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); 1814 1815 iommu_free_page(ptr); 1816 } 1817 } 1818 1819 static void free_gcr3_tbl_level2(u64 *tbl) 1820 { 1821 u64 *ptr; 1822 int i; 1823 1824 for (i = 0; i < 512; ++i) { 1825 if (!(tbl[i] & GCR3_VALID)) 1826 continue; 1827 1828 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); 1829 1830 free_gcr3_tbl_level1(ptr); 1831 } 1832 } 1833 1834 static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) 1835 { 1836 if (gcr3_info->glx == 2) 1837 free_gcr3_tbl_level2(gcr3_info->gcr3_tbl); 1838 else if (gcr3_info->glx == 1) 1839 free_gcr3_tbl_level1(gcr3_info->gcr3_tbl); 1840 else 1841 WARN_ON_ONCE(gcr3_info->glx != 0); 1842 1843 gcr3_info->glx = 0; 1844 1845 /* Free per device domain ID */ 1846 pdom_id_free(gcr3_info->domid); 1847 1848 iommu_free_page(gcr3_info->gcr3_tbl); 1849 gcr3_info->gcr3_tbl = NULL; 1850 } 1851 1852 /* 1853 * Number of GCR3 table levels required. Level must be 4-Kbyte 1854 * page and can contain up to 512 entries. 1855 */ 1856 static int get_gcr3_levels(int pasids) 1857 { 1858 int levels; 1859 1860 if (pasids == -1) 1861 return amd_iommu_max_glx_val; 1862 1863 levels = get_count_order(pasids); 1864 1865 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; 1866 } 1867 1868 static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, 1869 struct amd_iommu *iommu, int pasids) 1870 { 1871 int levels = get_gcr3_levels(pasids); 1872 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; 1873 int domid; 1874 1875 if (levels > amd_iommu_max_glx_val) 1876 return -EINVAL; 1877 1878 if (gcr3_info->gcr3_tbl) 1879 return -EBUSY; 1880 1881 /* Allocate per device domain ID */ 1882 domid = pdom_id_alloc(); 1883 if (domid <= 0) 1884 return -ENOSPC; 1885 gcr3_info->domid = domid; 1886 1887 gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); 1888 if (gcr3_info->gcr3_tbl == NULL) { 1889 pdom_id_free(domid); 1890 return -ENOMEM; 1891 } 1892 1893 gcr3_info->glx = levels; 1894 1895 return 0; 1896 } 1897 1898 static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info, 1899 ioasid_t pasid, bool alloc) 1900 { 1901 int index; 1902 u64 *pte; 1903 u64 *root = gcr3_info->gcr3_tbl; 1904 int level = gcr3_info->glx; 1905 1906 while (true) { 1907 1908 index = (pasid >> (9 * level)) & 0x1ff; 1909 pte = &root[index]; 1910 1911 if (level == 0) 1912 break; 1913 1914 if (!(*pte & GCR3_VALID)) { 1915 if (!alloc) 1916 return NULL; 1917 1918 root = (void *)get_zeroed_page(GFP_ATOMIC); 1919 if (root == NULL) 1920 return NULL; 1921 1922 *pte = iommu_virt_to_phys(root) | GCR3_VALID; 1923 } 1924 1925 root = iommu_phys_to_virt(*pte & PAGE_MASK); 1926 1927 level -= 1; 1928 } 1929 1930 return pte; 1931 } 1932 1933 static int update_gcr3(struct iommu_dev_data *dev_data, 1934 ioasid_t pasid, unsigned long gcr3, bool set) 1935 { 1936 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 1937 u64 *pte; 1938 1939 pte = __get_gcr3_pte(gcr3_info, pasid, true); 1940 if (pte == NULL) 1941 return -ENOMEM; 1942 1943 if (set) 1944 *pte = (gcr3 & PAGE_MASK) | GCR3_VALID; 1945 else 1946 *pte = 0; 1947 1948 dev_flush_pasid_all(dev_data, pasid); 1949 return 0; 1950 } 1951 1952 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, 1953 unsigned long gcr3) 1954 { 1955 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 1956 int ret; 1957 1958 iommu_group_mutex_assert(dev_data->dev); 1959 1960 ret = update_gcr3(dev_data, pasid, gcr3, true); 1961 if (ret) 1962 return ret; 1963 1964 gcr3_info->pasid_cnt++; 1965 return ret; 1966 } 1967 1968 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) 1969 { 1970 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 1971 int ret; 1972 1973 iommu_group_mutex_assert(dev_data->dev); 1974 1975 ret = update_gcr3(dev_data, pasid, 0, false); 1976 if (ret) 1977 return ret; 1978 1979 gcr3_info->pasid_cnt--; 1980 return ret; 1981 } 1982 1983 static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, 1984 struct dev_table_entry *new) 1985 { 1986 /* All existing DTE must have V bit set */ 1987 new->data128[0] = DTE_FLAG_V; 1988 new->data128[1] = 0; 1989 } 1990 1991 /* 1992 * Note: 1993 * The old value for GCR3 table and GPT have been cleared from caller. 1994 */ 1995 static void set_dte_gcr3_table(struct amd_iommu *iommu, 1996 struct iommu_dev_data *dev_data, 1997 struct dev_table_entry *target) 1998 { 1999 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2000 u64 gcr3; 2001 2002 if (!gcr3_info->gcr3_tbl) 2003 return; 2004 2005 pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n", 2006 __func__, dev_data->devid, gcr3_info->glx, 2007 (unsigned long long)gcr3_info->gcr3_tbl); 2008 2009 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl); 2010 2011 target->data[0] |= DTE_FLAG_GV | 2012 FIELD_PREP(DTE_GLX, gcr3_info->glx) | 2013 FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12); 2014 if (pdom_is_v2_pgtbl_mode(dev_data->domain)) 2015 target->data[0] |= DTE_FLAG_GIOV; 2016 2017 target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) | 2018 FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31); 2019 2020 /* Guest page table can only support 4 and 5 levels */ 2021 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) 2022 target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL); 2023 else 2024 target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL); 2025 } 2026 2027 static void set_dte_entry(struct amd_iommu *iommu, 2028 struct iommu_dev_data *dev_data) 2029 { 2030 u16 domid; 2031 u32 old_domid; 2032 struct dev_table_entry *initial_dte; 2033 struct dev_table_entry new = {}; 2034 struct protection_domain *domain = dev_data->domain; 2035 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2036 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; 2037 2038 if (gcr3_info && gcr3_info->gcr3_tbl) 2039 domid = dev_data->gcr3_info.domid; 2040 else 2041 domid = domain->id; 2042 2043 make_clear_dte(dev_data, dte, &new); 2044 2045 if (domain->iop.mode != PAGE_MODE_NONE) 2046 new.data[0] |= iommu_virt_to_phys(domain->iop.root); 2047 2048 new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) 2049 << DEV_ENTRY_MODE_SHIFT; 2050 2051 new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; 2052 2053 /* 2054 * When SNP is enabled, we can only support TV=1 with non-zero domain ID. 2055 * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in 2056 * do_iommu_domain_alloc(). 2057 */ 2058 WARN_ON(amd_iommu_snp_en && (domid == 0)); 2059 new.data[0] |= DTE_FLAG_TV; 2060 2061 if (dev_data->ppr) 2062 new.data[0] |= 1ULL << DEV_ENTRY_PPR; 2063 2064 if (domain->dirty_tracking) 2065 new.data[0] |= DTE_FLAG_HAD; 2066 2067 if (dev_data->ats_enabled) 2068 new.data[1] |= DTE_FLAG_IOTLB; 2069 2070 old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK; 2071 new.data[1] |= domid; 2072 2073 /* 2074 * Restore cached persistent DTE bits, which can be set by information 2075 * in IVRS table. See set_dev_entry_from_acpi(). 2076 */ 2077 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); 2078 if (initial_dte) { 2079 new.data128[0] |= initial_dte->data128[0]; 2080 new.data128[1] |= initial_dte->data128[1]; 2081 } 2082 2083 set_dte_gcr3_table(iommu, dev_data, &new); 2084 2085 update_dte256(iommu, dev_data, &new); 2086 2087 /* 2088 * A kdump kernel might be replacing a domain ID that was copied from 2089 * the previous kernel--if so, it needs to flush the translation cache 2090 * entries for the old domain ID that is being overwritten 2091 */ 2092 if (old_domid) { 2093 amd_iommu_flush_tlb_domid(iommu, old_domid); 2094 } 2095 } 2096 2097 /* 2098 * Clear DMA-remap related flags to block all DMA (blockeded domain) 2099 */ 2100 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) 2101 { 2102 struct dev_table_entry new = {}; 2103 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; 2104 2105 make_clear_dte(dev_data, dte, &new); 2106 update_dte256(iommu, dev_data, &new); 2107 } 2108 2109 /* Update and flush DTE for the given device */ 2110 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) 2111 { 2112 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); 2113 2114 if (set) 2115 set_dte_entry(iommu, dev_data); 2116 else 2117 clear_dte_entry(iommu, dev_data); 2118 2119 clone_aliases(iommu, dev_data->dev); 2120 device_flush_dte(dev_data); 2121 iommu_completion_wait(iommu); 2122 } 2123 2124 /* 2125 * If domain is SVA capable then initialize GCR3 table. Also if domain is 2126 * in v2 page table mode then update GCR3[0]. 2127 */ 2128 static int init_gcr3_table(struct iommu_dev_data *dev_data, 2129 struct protection_domain *pdom) 2130 { 2131 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2132 int max_pasids = dev_data->max_pasids; 2133 int ret = 0; 2134 2135 /* 2136 * If domain is in pt mode then setup GCR3 table only if device 2137 * is PASID capable 2138 */ 2139 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) 2140 return ret; 2141 2142 /* 2143 * By default, setup GCR3 table to support MAX PASIDs 2144 * supported by the device/IOMMU. 2145 */ 2146 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 2147 max_pasids > 0 ? max_pasids : 1); 2148 if (ret) 2149 return ret; 2150 2151 /* Setup GCR3[0] only if domain is setup with v2 page table mode */ 2152 if (!pdom_is_v2_pgtbl_mode(pdom)) 2153 return ret; 2154 2155 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); 2156 if (ret) 2157 free_gcr3_table(&dev_data->gcr3_info); 2158 2159 return ret; 2160 } 2161 2162 static void destroy_gcr3_table(struct iommu_dev_data *dev_data, 2163 struct protection_domain *pdom) 2164 { 2165 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; 2166 2167 if (pdom_is_v2_pgtbl_mode(pdom)) 2168 update_gcr3(dev_data, 0, 0, false); 2169 2170 if (gcr3_info->gcr3_tbl == NULL) 2171 return; 2172 2173 free_gcr3_table(gcr3_info); 2174 } 2175 2176 static int pdom_attach_iommu(struct amd_iommu *iommu, 2177 struct protection_domain *pdom) 2178 { 2179 struct pdom_iommu_info *pdom_iommu_info, *curr; 2180 unsigned long flags; 2181 int ret = 0; 2182 2183 spin_lock_irqsave(&pdom->lock, flags); 2184 2185 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); 2186 if (pdom_iommu_info) { 2187 pdom_iommu_info->refcnt++; 2188 goto out_unlock; 2189 } 2190 2191 pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC); 2192 if (!pdom_iommu_info) { 2193 ret = -ENOMEM; 2194 goto out_unlock; 2195 } 2196 2197 pdom_iommu_info->iommu = iommu; 2198 pdom_iommu_info->refcnt = 1; 2199 2200 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, 2201 NULL, pdom_iommu_info, GFP_ATOMIC); 2202 if (curr) { 2203 kfree(pdom_iommu_info); 2204 ret = -ENOSPC; 2205 goto out_unlock; 2206 } 2207 2208 out_unlock: 2209 spin_unlock_irqrestore(&pdom->lock, flags); 2210 return ret; 2211 } 2212 2213 static void pdom_detach_iommu(struct amd_iommu *iommu, 2214 struct protection_domain *pdom) 2215 { 2216 struct pdom_iommu_info *pdom_iommu_info; 2217 unsigned long flags; 2218 2219 spin_lock_irqsave(&pdom->lock, flags); 2220 2221 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); 2222 if (!pdom_iommu_info) { 2223 spin_unlock_irqrestore(&pdom->lock, flags); 2224 return; 2225 } 2226 2227 pdom_iommu_info->refcnt--; 2228 if (pdom_iommu_info->refcnt == 0) { 2229 xa_erase(&pdom->iommu_array, iommu->index); 2230 kfree(pdom_iommu_info); 2231 } 2232 2233 spin_unlock_irqrestore(&pdom->lock, flags); 2234 } 2235 2236 /* 2237 * If a device is not yet associated with a domain, this function makes the 2238 * device visible in the domain 2239 */ 2240 static int attach_device(struct device *dev, 2241 struct protection_domain *domain) 2242 { 2243 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2244 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2245 struct pci_dev *pdev; 2246 unsigned long flags; 2247 int ret = 0; 2248 2249 mutex_lock(&dev_data->mutex); 2250 2251 if (dev_data->domain != NULL) { 2252 ret = -EBUSY; 2253 goto out; 2254 } 2255 2256 /* Do reference counting */ 2257 ret = pdom_attach_iommu(iommu, domain); 2258 if (ret) 2259 goto out; 2260 2261 /* Setup GCR3 table */ 2262 if (pdom_is_sva_capable(domain)) { 2263 ret = init_gcr3_table(dev_data, domain); 2264 if (ret) { 2265 pdom_detach_iommu(iommu, domain); 2266 goto out; 2267 } 2268 } 2269 2270 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; 2271 if (pdev && pdom_is_sva_capable(domain)) { 2272 pdev_enable_caps(pdev); 2273 2274 /* 2275 * Device can continue to function even if IOPF 2276 * enablement failed. Hence in error path just 2277 * disable device PRI support. 2278 */ 2279 if (amd_iommu_iopf_add_device(iommu, dev_data)) 2280 pdev_disable_cap_pri(pdev); 2281 } else if (pdev) { 2282 pdev_enable_cap_ats(pdev); 2283 } 2284 2285 /* Update data structures */ 2286 dev_data->domain = domain; 2287 spin_lock_irqsave(&domain->lock, flags); 2288 list_add(&dev_data->list, &domain->dev_list); 2289 spin_unlock_irqrestore(&domain->lock, flags); 2290 2291 /* Update device table */ 2292 dev_update_dte(dev_data, true); 2293 2294 out: 2295 mutex_unlock(&dev_data->mutex); 2296 2297 return ret; 2298 } 2299 2300 /* 2301 * Removes a device from a protection domain (with devtable_lock held) 2302 */ 2303 static void detach_device(struct device *dev) 2304 { 2305 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2306 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); 2307 struct protection_domain *domain = dev_data->domain; 2308 unsigned long flags; 2309 2310 mutex_lock(&dev_data->mutex); 2311 2312 /* 2313 * First check if the device is still attached. It might already 2314 * be detached from its domain because the generic 2315 * iommu_detach_group code detached it and we try again here in 2316 * our alias handling. 2317 */ 2318 if (WARN_ON(!dev_data->domain)) 2319 goto out; 2320 2321 /* Remove IOPF handler */ 2322 if (dev_data->ppr) { 2323 iopf_queue_flush_dev(dev); 2324 amd_iommu_iopf_remove_device(iommu, dev_data); 2325 } 2326 2327 if (dev_is_pci(dev)) 2328 pdev_disable_caps(to_pci_dev(dev)); 2329 2330 /* Clear DTE and flush the entry */ 2331 dev_update_dte(dev_data, false); 2332 2333 /* Flush IOTLB and wait for the flushes to finish */ 2334 spin_lock_irqsave(&domain->lock, flags); 2335 amd_iommu_domain_flush_all(domain); 2336 list_del(&dev_data->list); 2337 spin_unlock_irqrestore(&domain->lock, flags); 2338 2339 /* Clear GCR3 table */ 2340 if (pdom_is_sva_capable(domain)) 2341 destroy_gcr3_table(dev_data, domain); 2342 2343 /* Update data structures */ 2344 dev_data->domain = NULL; 2345 2346 /* decrease reference counters - needs to happen after the flushes */ 2347 pdom_detach_iommu(iommu, domain); 2348 2349 out: 2350 mutex_unlock(&dev_data->mutex); 2351 } 2352 2353 static struct iommu_device *amd_iommu_probe_device(struct device *dev) 2354 { 2355 struct iommu_device *iommu_dev; 2356 struct amd_iommu *iommu; 2357 struct iommu_dev_data *dev_data; 2358 int ret; 2359 2360 if (!check_device(dev)) 2361 return ERR_PTR(-ENODEV); 2362 2363 iommu = rlookup_amd_iommu(dev); 2364 if (!iommu) 2365 return ERR_PTR(-ENODEV); 2366 2367 /* Not registered yet? */ 2368 if (!iommu->iommu.ops) 2369 return ERR_PTR(-ENODEV); 2370 2371 if (dev_iommu_priv_get(dev)) 2372 return &iommu->iommu; 2373 2374 ret = iommu_init_device(iommu, dev); 2375 if (ret) { 2376 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); 2377 iommu_dev = ERR_PTR(ret); 2378 iommu_ignore_device(iommu, dev); 2379 goto out_err; 2380 } 2381 2382 amd_iommu_set_pci_msi_domain(dev, iommu); 2383 iommu_dev = &iommu->iommu; 2384 2385 /* 2386 * If IOMMU and device supports PASID then it will contain max 2387 * supported PASIDs, else it will be zero. 2388 */ 2389 dev_data = dev_iommu_priv_get(dev); 2390 if (amd_iommu_pasid_supported() && dev_is_pci(dev) && 2391 pdev_pasid_supported(dev_data)) { 2392 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, 2393 pci_max_pasids(to_pci_dev(dev))); 2394 } 2395 2396 out_err: 2397 2398 iommu_completion_wait(iommu); 2399 2400 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) 2401 dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K; 2402 else 2403 dev_data->max_irqs = MAX_IRQS_PER_TABLE_512; 2404 2405 if (dev_is_pci(dev)) 2406 pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT); 2407 2408 return iommu_dev; 2409 } 2410 2411 static void amd_iommu_release_device(struct device *dev) 2412 { 2413 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2414 2415 WARN_ON(dev_data->domain); 2416 2417 /* 2418 * We keep dev_data around for unplugged devices and reuse it when the 2419 * device is re-plugged - not doing so would introduce a ton of races. 2420 */ 2421 } 2422 2423 static struct iommu_group *amd_iommu_device_group(struct device *dev) 2424 { 2425 if (dev_is_pci(dev)) 2426 return pci_device_group(dev); 2427 2428 return acpihid_device_group(dev); 2429 } 2430 2431 /***************************************************************************** 2432 * 2433 * The following functions belong to the exported interface of AMD IOMMU 2434 * 2435 * This interface allows access to lower level functions of the IOMMU 2436 * like protection domain handling and assignement of devices to domains 2437 * which is not possible with the dma_ops interface. 2438 * 2439 *****************************************************************************/ 2440 2441 static void protection_domain_init(struct protection_domain *domain) 2442 { 2443 spin_lock_init(&domain->lock); 2444 INIT_LIST_HEAD(&domain->dev_list); 2445 INIT_LIST_HEAD(&domain->dev_data_list); 2446 xa_init(&domain->iommu_array); 2447 } 2448 2449 struct protection_domain *protection_domain_alloc(void) 2450 { 2451 struct protection_domain *domain; 2452 int domid; 2453 2454 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2455 if (!domain) 2456 return NULL; 2457 2458 domid = pdom_id_alloc(); 2459 if (domid <= 0) { 2460 kfree(domain); 2461 return NULL; 2462 } 2463 domain->id = domid; 2464 2465 protection_domain_init(domain); 2466 2467 return domain; 2468 } 2469 2470 static int pdom_setup_pgtable(struct protection_domain *domain, 2471 struct device *dev) 2472 { 2473 struct io_pgtable_ops *pgtbl_ops; 2474 enum io_pgtable_fmt fmt; 2475 2476 switch (domain->pd_mode) { 2477 case PD_MODE_V1: 2478 fmt = AMD_IOMMU_V1; 2479 break; 2480 case PD_MODE_V2: 2481 fmt = AMD_IOMMU_V2; 2482 break; 2483 } 2484 2485 domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev); 2486 pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain); 2487 if (!pgtbl_ops) 2488 return -ENOMEM; 2489 2490 return 0; 2491 } 2492 2493 static inline u64 dma_max_address(enum protection_domain_mode pgtable) 2494 { 2495 if (pgtable == PD_MODE_V1) 2496 return ~0ULL; 2497 2498 /* V2 with 4/5 level page table */ 2499 return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); 2500 } 2501 2502 static bool amd_iommu_hd_support(struct amd_iommu *iommu) 2503 { 2504 return iommu && (iommu->features & FEATURE_HDSUP); 2505 } 2506 2507 static struct iommu_domain * 2508 do_iommu_domain_alloc(struct device *dev, u32 flags, 2509 enum protection_domain_mode pgtable) 2510 { 2511 bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 2512 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2513 struct protection_domain *domain; 2514 int ret; 2515 2516 domain = protection_domain_alloc(); 2517 if (!domain) 2518 return ERR_PTR(-ENOMEM); 2519 2520 domain->pd_mode = pgtable; 2521 ret = pdom_setup_pgtable(domain, dev); 2522 if (ret) { 2523 pdom_id_free(domain->id); 2524 kfree(domain); 2525 return ERR_PTR(ret); 2526 } 2527 2528 domain->domain.geometry.aperture_start = 0; 2529 domain->domain.geometry.aperture_end = dma_max_address(pgtable); 2530 domain->domain.geometry.force_aperture = true; 2531 domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; 2532 2533 domain->domain.type = IOMMU_DOMAIN_UNMANAGED; 2534 domain->domain.ops = iommu->iommu.ops->default_domain_ops; 2535 2536 if (dirty_tracking) 2537 domain->domain.dirty_ops = &amd_dirty_ops; 2538 2539 return &domain->domain; 2540 } 2541 2542 static struct iommu_domain * 2543 amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, 2544 const struct iommu_user_data *user_data) 2545 2546 { 2547 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2548 const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | 2549 IOMMU_HWPT_ALLOC_PASID; 2550 2551 if ((flags & ~supported_flags) || user_data) 2552 return ERR_PTR(-EOPNOTSUPP); 2553 2554 switch (flags & supported_flags) { 2555 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING: 2556 /* Allocate domain with v1 page table for dirty tracking */ 2557 if (!amd_iommu_hd_support(iommu)) 2558 break; 2559 return do_iommu_domain_alloc(dev, flags, PD_MODE_V1); 2560 case IOMMU_HWPT_ALLOC_PASID: 2561 /* Allocate domain with v2 page table if IOMMU supports PASID. */ 2562 if (!amd_iommu_pasid_supported()) 2563 break; 2564 return do_iommu_domain_alloc(dev, flags, PD_MODE_V2); 2565 case 0: 2566 /* If nothing specific is required use the kernel commandline default */ 2567 return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable); 2568 default: 2569 break; 2570 } 2571 return ERR_PTR(-EOPNOTSUPP); 2572 } 2573 2574 void amd_iommu_domain_free(struct iommu_domain *dom) 2575 { 2576 struct protection_domain *domain = to_pdomain(dom); 2577 2578 WARN_ON(!list_empty(&domain->dev_list)); 2579 if (domain->domain.type & __IOMMU_DOMAIN_PAGING) 2580 free_io_pgtable_ops(&domain->iop.pgtbl.ops); 2581 pdom_id_free(domain->id); 2582 kfree(domain); 2583 } 2584 2585 static int blocked_domain_attach_device(struct iommu_domain *domain, 2586 struct device *dev) 2587 { 2588 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2589 2590 if (dev_data->domain) 2591 detach_device(dev); 2592 2593 /* Clear DTE and flush the entry */ 2594 mutex_lock(&dev_data->mutex); 2595 dev_update_dte(dev_data, false); 2596 mutex_unlock(&dev_data->mutex); 2597 2598 return 0; 2599 } 2600 2601 static int blocked_domain_set_dev_pasid(struct iommu_domain *domain, 2602 struct device *dev, ioasid_t pasid, 2603 struct iommu_domain *old) 2604 { 2605 amd_iommu_remove_dev_pasid(dev, pasid, old); 2606 return 0; 2607 } 2608 2609 static struct iommu_domain blocked_domain = { 2610 .type = IOMMU_DOMAIN_BLOCKED, 2611 .ops = &(const struct iommu_domain_ops) { 2612 .attach_dev = blocked_domain_attach_device, 2613 .set_dev_pasid = blocked_domain_set_dev_pasid, 2614 } 2615 }; 2616 2617 static struct protection_domain identity_domain; 2618 2619 static const struct iommu_domain_ops identity_domain_ops = { 2620 .attach_dev = amd_iommu_attach_device, 2621 }; 2622 2623 void amd_iommu_init_identity_domain(void) 2624 { 2625 struct iommu_domain *domain = &identity_domain.domain; 2626 2627 domain->type = IOMMU_DOMAIN_IDENTITY; 2628 domain->ops = &identity_domain_ops; 2629 domain->owner = &amd_iommu_ops; 2630 2631 identity_domain.id = pdom_id_alloc(); 2632 2633 protection_domain_init(&identity_domain); 2634 } 2635 2636 /* Same as blocked domain except it supports only ops->attach_dev() */ 2637 static struct iommu_domain release_domain = { 2638 .type = IOMMU_DOMAIN_BLOCKED, 2639 .ops = &(const struct iommu_domain_ops) { 2640 .attach_dev = blocked_domain_attach_device, 2641 } 2642 }; 2643 2644 static int amd_iommu_attach_device(struct iommu_domain *dom, 2645 struct device *dev) 2646 { 2647 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2648 struct protection_domain *domain = to_pdomain(dom); 2649 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2650 int ret; 2651 2652 /* 2653 * Skip attach device to domain if new domain is same as 2654 * devices current domain 2655 */ 2656 if (dev_data->domain == domain) 2657 return 0; 2658 2659 dev_data->defer_attach = false; 2660 2661 /* 2662 * Restrict to devices with compatible IOMMU hardware support 2663 * when enforcement of dirty tracking is enabled. 2664 */ 2665 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) 2666 return -EINVAL; 2667 2668 if (dev_data->domain) 2669 detach_device(dev); 2670 2671 ret = attach_device(dev, domain); 2672 2673 #ifdef CONFIG_IRQ_REMAP 2674 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { 2675 if (dom->type == IOMMU_DOMAIN_UNMANAGED) 2676 dev_data->use_vapic = 1; 2677 else 2678 dev_data->use_vapic = 0; 2679 } 2680 #endif 2681 2682 return ret; 2683 } 2684 2685 static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, 2686 unsigned long iova, size_t size) 2687 { 2688 struct protection_domain *domain = to_pdomain(dom); 2689 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; 2690 2691 if (ops->map_pages) 2692 domain_flush_np_cache(domain, iova, size); 2693 return 0; 2694 } 2695 2696 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, 2697 phys_addr_t paddr, size_t pgsize, size_t pgcount, 2698 int iommu_prot, gfp_t gfp, size_t *mapped) 2699 { 2700 struct protection_domain *domain = to_pdomain(dom); 2701 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; 2702 int prot = 0; 2703 int ret = -EINVAL; 2704 2705 if ((domain->pd_mode == PD_MODE_V1) && 2706 (domain->iop.mode == PAGE_MODE_NONE)) 2707 return -EINVAL; 2708 2709 if (iommu_prot & IOMMU_READ) 2710 prot |= IOMMU_PROT_IR; 2711 if (iommu_prot & IOMMU_WRITE) 2712 prot |= IOMMU_PROT_IW; 2713 2714 if (ops->map_pages) { 2715 ret = ops->map_pages(ops, iova, paddr, pgsize, 2716 pgcount, prot, gfp, mapped); 2717 } 2718 2719 return ret; 2720 } 2721 2722 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain, 2723 struct iommu_iotlb_gather *gather, 2724 unsigned long iova, size_t size) 2725 { 2726 /* 2727 * AMD's IOMMU can flush as many pages as necessary in a single flush. 2728 * Unless we run in a virtual machine, which can be inferred according 2729 * to whether "non-present cache" is on, it is probably best to prefer 2730 * (potentially) too extensive TLB flushing (i.e., more misses) over 2731 * mutliple TLB flushes (i.e., more flushes). For virtual machines the 2732 * hypervisor needs to synchronize the host IOMMU PTEs with those of 2733 * the guest, and the trade-off is different: unnecessary TLB flushes 2734 * should be avoided. 2735 */ 2736 if (amd_iommu_np_cache && 2737 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 2738 iommu_iotlb_sync(domain, gather); 2739 2740 iommu_iotlb_gather_add_range(gather, iova, size); 2741 } 2742 2743 static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, 2744 size_t pgsize, size_t pgcount, 2745 struct iommu_iotlb_gather *gather) 2746 { 2747 struct protection_domain *domain = to_pdomain(dom); 2748 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; 2749 size_t r; 2750 2751 if ((domain->pd_mode == PD_MODE_V1) && 2752 (domain->iop.mode == PAGE_MODE_NONE)) 2753 return 0; 2754 2755 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; 2756 2757 if (r) 2758 amd_iommu_iotlb_gather_add_page(dom, gather, iova, r); 2759 2760 return r; 2761 } 2762 2763 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2764 dma_addr_t iova) 2765 { 2766 struct protection_domain *domain = to_pdomain(dom); 2767 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; 2768 2769 return ops->iova_to_phys(ops, iova); 2770 } 2771 2772 static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) 2773 { 2774 switch (cap) { 2775 case IOMMU_CAP_CACHE_COHERENCY: 2776 return true; 2777 case IOMMU_CAP_NOEXEC: 2778 return false; 2779 case IOMMU_CAP_PRE_BOOT_PROTECTION: 2780 return amdr_ivrs_remap_support; 2781 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: 2782 return true; 2783 case IOMMU_CAP_DEFERRED_FLUSH: 2784 return true; 2785 case IOMMU_CAP_DIRTY_TRACKING: { 2786 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); 2787 2788 return amd_iommu_hd_support(iommu); 2789 } 2790 default: 2791 break; 2792 } 2793 2794 return false; 2795 } 2796 2797 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, 2798 bool enable) 2799 { 2800 struct protection_domain *pdomain = to_pdomain(domain); 2801 struct dev_table_entry *dte; 2802 struct iommu_dev_data *dev_data; 2803 bool domain_flush = false; 2804 struct amd_iommu *iommu; 2805 unsigned long flags; 2806 u64 new; 2807 2808 spin_lock_irqsave(&pdomain->lock, flags); 2809 if (!(pdomain->dirty_tracking ^ enable)) { 2810 spin_unlock_irqrestore(&pdomain->lock, flags); 2811 return 0; 2812 } 2813 2814 list_for_each_entry(dev_data, &pdomain->dev_list, list) { 2815 spin_lock(&dev_data->dte_lock); 2816 iommu = get_amd_iommu_from_dev_data(dev_data); 2817 dte = &get_dev_table(iommu)[dev_data->devid]; 2818 new = dte->data[0]; 2819 new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD); 2820 dte->data[0] = new; 2821 spin_unlock(&dev_data->dte_lock); 2822 2823 /* Flush device DTE */ 2824 device_flush_dte(dev_data); 2825 domain_flush = true; 2826 } 2827 2828 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ 2829 if (domain_flush) 2830 amd_iommu_domain_flush_all(pdomain); 2831 2832 pdomain->dirty_tracking = enable; 2833 spin_unlock_irqrestore(&pdomain->lock, flags); 2834 2835 return 0; 2836 } 2837 2838 static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, 2839 unsigned long iova, size_t size, 2840 unsigned long flags, 2841 struct iommu_dirty_bitmap *dirty) 2842 { 2843 struct protection_domain *pdomain = to_pdomain(domain); 2844 struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops; 2845 unsigned long lflags; 2846 2847 if (!ops || !ops->read_and_clear_dirty) 2848 return -EOPNOTSUPP; 2849 2850 spin_lock_irqsave(&pdomain->lock, lflags); 2851 if (!pdomain->dirty_tracking && dirty->bitmap) { 2852 spin_unlock_irqrestore(&pdomain->lock, lflags); 2853 return -EINVAL; 2854 } 2855 spin_unlock_irqrestore(&pdomain->lock, lflags); 2856 2857 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); 2858 } 2859 2860 static void amd_iommu_get_resv_regions(struct device *dev, 2861 struct list_head *head) 2862 { 2863 struct iommu_resv_region *region; 2864 struct unity_map_entry *entry; 2865 struct amd_iommu *iommu; 2866 struct amd_iommu_pci_seg *pci_seg; 2867 int devid, sbdf; 2868 2869 sbdf = get_device_sbdf_id(dev); 2870 if (sbdf < 0) 2871 return; 2872 2873 devid = PCI_SBDF_TO_DEVID(sbdf); 2874 iommu = get_amd_iommu_from_dev(dev); 2875 pci_seg = iommu->pci_seg; 2876 2877 list_for_each_entry(entry, &pci_seg->unity_map, list) { 2878 int type, prot = 0; 2879 size_t length; 2880 2881 if (devid < entry->devid_start || devid > entry->devid_end) 2882 continue; 2883 2884 type = IOMMU_RESV_DIRECT; 2885 length = entry->address_end - entry->address_start; 2886 if (entry->prot & IOMMU_PROT_IR) 2887 prot |= IOMMU_READ; 2888 if (entry->prot & IOMMU_PROT_IW) 2889 prot |= IOMMU_WRITE; 2890 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) 2891 /* Exclusion range */ 2892 type = IOMMU_RESV_RESERVED; 2893 2894 region = iommu_alloc_resv_region(entry->address_start, 2895 length, prot, type, 2896 GFP_KERNEL); 2897 if (!region) { 2898 dev_err(dev, "Out of memory allocating dm-regions\n"); 2899 return; 2900 } 2901 list_add_tail(®ion->list, head); 2902 } 2903 2904 region = iommu_alloc_resv_region(MSI_RANGE_START, 2905 MSI_RANGE_END - MSI_RANGE_START + 1, 2906 0, IOMMU_RESV_MSI, GFP_KERNEL); 2907 if (!region) 2908 return; 2909 list_add_tail(®ion->list, head); 2910 2911 region = iommu_alloc_resv_region(HT_RANGE_START, 2912 HT_RANGE_END - HT_RANGE_START + 1, 2913 0, IOMMU_RESV_RESERVED, GFP_KERNEL); 2914 if (!region) 2915 return; 2916 list_add_tail(®ion->list, head); 2917 } 2918 2919 static bool amd_iommu_is_attach_deferred(struct device *dev) 2920 { 2921 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); 2922 2923 return dev_data->defer_attach; 2924 } 2925 2926 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) 2927 { 2928 struct protection_domain *dom = to_pdomain(domain); 2929 unsigned long flags; 2930 2931 spin_lock_irqsave(&dom->lock, flags); 2932 amd_iommu_domain_flush_all(dom); 2933 spin_unlock_irqrestore(&dom->lock, flags); 2934 } 2935 2936 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, 2937 struct iommu_iotlb_gather *gather) 2938 { 2939 struct protection_domain *dom = to_pdomain(domain); 2940 unsigned long flags; 2941 2942 spin_lock_irqsave(&dom->lock, flags); 2943 amd_iommu_domain_flush_pages(dom, gather->start, 2944 gather->end - gather->start + 1); 2945 spin_unlock_irqrestore(&dom->lock, flags); 2946 } 2947 2948 static int amd_iommu_def_domain_type(struct device *dev) 2949 { 2950 struct iommu_dev_data *dev_data; 2951 2952 dev_data = dev_iommu_priv_get(dev); 2953 if (!dev_data) 2954 return 0; 2955 2956 /* Always use DMA domain for untrusted device */ 2957 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 2958 return IOMMU_DOMAIN_DMA; 2959 2960 /* 2961 * Do not identity map IOMMUv2 capable devices when: 2962 * - memory encryption is active, because some of those devices 2963 * (AMD GPUs) don't have the encryption bit in their DMA-mask 2964 * and require remapping. 2965 * - SNP is enabled, because it prohibits DTE[Mode]=0. 2966 */ 2967 if (pdev_pasid_supported(dev_data) && 2968 !cc_platform_has(CC_ATTR_MEM_ENCRYPT) && 2969 !amd_iommu_snp_en) { 2970 return IOMMU_DOMAIN_IDENTITY; 2971 } 2972 2973 return 0; 2974 } 2975 2976 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) 2977 { 2978 /* IOMMU_PTE_FC is always set */ 2979 return true; 2980 } 2981 2982 static const struct iommu_dirty_ops amd_dirty_ops = { 2983 .set_dirty_tracking = amd_iommu_set_dirty_tracking, 2984 .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, 2985 }; 2986 2987 static int amd_iommu_dev_enable_feature(struct device *dev, 2988 enum iommu_dev_features feat) 2989 { 2990 int ret = 0; 2991 2992 switch (feat) { 2993 case IOMMU_DEV_FEAT_IOPF: 2994 case IOMMU_DEV_FEAT_SVA: 2995 break; 2996 default: 2997 ret = -EINVAL; 2998 break; 2999 } 3000 return ret; 3001 } 3002 3003 static int amd_iommu_dev_disable_feature(struct device *dev, 3004 enum iommu_dev_features feat) 3005 { 3006 int ret = 0; 3007 3008 switch (feat) { 3009 case IOMMU_DEV_FEAT_IOPF: 3010 case IOMMU_DEV_FEAT_SVA: 3011 break; 3012 default: 3013 ret = -EINVAL; 3014 break; 3015 } 3016 return ret; 3017 } 3018 3019 const struct iommu_ops amd_iommu_ops = { 3020 .capable = amd_iommu_capable, 3021 .blocked_domain = &blocked_domain, 3022 .release_domain = &release_domain, 3023 .identity_domain = &identity_domain.domain, 3024 .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, 3025 .domain_alloc_sva = amd_iommu_domain_alloc_sva, 3026 .probe_device = amd_iommu_probe_device, 3027 .release_device = amd_iommu_release_device, 3028 .device_group = amd_iommu_device_group, 3029 .get_resv_regions = amd_iommu_get_resv_regions, 3030 .is_attach_deferred = amd_iommu_is_attach_deferred, 3031 .def_domain_type = amd_iommu_def_domain_type, 3032 .dev_enable_feat = amd_iommu_dev_enable_feature, 3033 .dev_disable_feat = amd_iommu_dev_disable_feature, 3034 .page_response = amd_iommu_page_response, 3035 .default_domain_ops = &(const struct iommu_domain_ops) { 3036 .attach_dev = amd_iommu_attach_device, 3037 .map_pages = amd_iommu_map_pages, 3038 .unmap_pages = amd_iommu_unmap_pages, 3039 .iotlb_sync_map = amd_iommu_iotlb_sync_map, 3040 .iova_to_phys = amd_iommu_iova_to_phys, 3041 .flush_iotlb_all = amd_iommu_flush_iotlb_all, 3042 .iotlb_sync = amd_iommu_iotlb_sync, 3043 .free = amd_iommu_domain_free, 3044 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, 3045 } 3046 }; 3047 3048 #ifdef CONFIG_IRQ_REMAP 3049 3050 /***************************************************************************** 3051 * 3052 * Interrupt Remapping Implementation 3053 * 3054 *****************************************************************************/ 3055 3056 static struct irq_chip amd_ir_chip; 3057 static DEFINE_SPINLOCK(iommu_table_lock); 3058 3059 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) 3060 { 3061 int ret; 3062 u64 data; 3063 unsigned long flags; 3064 struct iommu_cmd cmd, cmd2; 3065 3066 if (iommu->irtcachedis_enabled) 3067 return; 3068 3069 build_inv_irt(&cmd, devid); 3070 data = atomic64_inc_return(&iommu->cmd_sem_val); 3071 build_completion_wait(&cmd2, iommu, data); 3072 3073 raw_spin_lock_irqsave(&iommu->lock, flags); 3074 ret = __iommu_queue_command_sync(iommu, &cmd, true); 3075 if (ret) 3076 goto out; 3077 ret = __iommu_queue_command_sync(iommu, &cmd2, false); 3078 if (ret) 3079 goto out; 3080 wait_on_sem(iommu, data); 3081 out: 3082 raw_spin_unlock_irqrestore(&iommu->lock, flags); 3083 } 3084 3085 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data) 3086 { 3087 if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K) 3088 return DTE_INTTABLEN_2K; 3089 return DTE_INTTABLEN_512; 3090 } 3091 3092 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, 3093 struct irq_remap_table *table) 3094 { 3095 u64 new; 3096 struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; 3097 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); 3098 3099 if (dev_data) 3100 spin_lock(&dev_data->dte_lock); 3101 3102 new = READ_ONCE(dte->data[2]); 3103 new &= ~DTE_IRQ_PHYS_ADDR_MASK; 3104 new |= iommu_virt_to_phys(table->table); 3105 new |= DTE_IRQ_REMAP_INTCTL; 3106 new |= iommu_get_int_tablen(dev_data); 3107 new |= DTE_IRQ_REMAP_ENABLE; 3108 WRITE_ONCE(dte->data[2], new); 3109 3110 if (dev_data) 3111 spin_unlock(&dev_data->dte_lock); 3112 } 3113 3114 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) 3115 { 3116 struct irq_remap_table *table; 3117 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 3118 3119 if (WARN_ONCE(!pci_seg->rlookup_table[devid], 3120 "%s: no iommu for devid %x:%x\n", 3121 __func__, pci_seg->id, devid)) 3122 return NULL; 3123 3124 table = pci_seg->irq_lookup_table[devid]; 3125 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n", 3126 __func__, pci_seg->id, devid)) 3127 return NULL; 3128 3129 return table; 3130 } 3131 3132 static struct irq_remap_table *__alloc_irq_table(int nid, int order) 3133 { 3134 struct irq_remap_table *table; 3135 3136 table = kzalloc(sizeof(*table), GFP_KERNEL); 3137 if (!table) 3138 return NULL; 3139 3140 table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order); 3141 if (!table->table) { 3142 kfree(table); 3143 return NULL; 3144 } 3145 raw_spin_lock_init(&table->lock); 3146 3147 return table; 3148 } 3149 3150 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, 3151 struct irq_remap_table *table) 3152 { 3153 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 3154 3155 pci_seg->irq_lookup_table[devid] = table; 3156 set_dte_irq_entry(iommu, devid, table); 3157 iommu_flush_dte(iommu, devid); 3158 } 3159 3160 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, 3161 void *data) 3162 { 3163 struct irq_remap_table *table = data; 3164 struct amd_iommu_pci_seg *pci_seg; 3165 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); 3166 3167 if (!iommu) 3168 return -EINVAL; 3169 3170 pci_seg = iommu->pci_seg; 3171 pci_seg->irq_lookup_table[alias] = table; 3172 set_dte_irq_entry(iommu, alias, table); 3173 iommu_flush_dte(pci_seg->rlookup_table[alias], alias); 3174 3175 return 0; 3176 } 3177 3178 static inline size_t get_irq_table_size(unsigned int max_irqs) 3179 { 3180 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3181 return max_irqs * sizeof(u32); 3182 3183 return max_irqs * (sizeof(u64) * 2); 3184 } 3185 3186 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, 3187 u16 devid, struct pci_dev *pdev, 3188 unsigned int max_irqs) 3189 { 3190 struct irq_remap_table *table = NULL; 3191 struct irq_remap_table *new_table = NULL; 3192 struct amd_iommu_pci_seg *pci_seg; 3193 unsigned long flags; 3194 int order = get_order(get_irq_table_size(max_irqs)); 3195 int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; 3196 u16 alias; 3197 3198 spin_lock_irqsave(&iommu_table_lock, flags); 3199 3200 pci_seg = iommu->pci_seg; 3201 table = pci_seg->irq_lookup_table[devid]; 3202 if (table) 3203 goto out_unlock; 3204 3205 alias = pci_seg->alias_table[devid]; 3206 table = pci_seg->irq_lookup_table[alias]; 3207 if (table) { 3208 set_remap_table_entry(iommu, devid, table); 3209 goto out_wait; 3210 } 3211 spin_unlock_irqrestore(&iommu_table_lock, flags); 3212 3213 /* Nothing there yet, allocate new irq remapping table */ 3214 new_table = __alloc_irq_table(nid, order); 3215 if (!new_table) 3216 return NULL; 3217 3218 spin_lock_irqsave(&iommu_table_lock, flags); 3219 3220 table = pci_seg->irq_lookup_table[devid]; 3221 if (table) 3222 goto out_unlock; 3223 3224 table = pci_seg->irq_lookup_table[alias]; 3225 if (table) { 3226 set_remap_table_entry(iommu, devid, table); 3227 goto out_wait; 3228 } 3229 3230 table = new_table; 3231 new_table = NULL; 3232 3233 if (pdev) 3234 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias, 3235 table); 3236 else 3237 set_remap_table_entry(iommu, devid, table); 3238 3239 if (devid != alias) 3240 set_remap_table_entry(iommu, alias, table); 3241 3242 out_wait: 3243 iommu_completion_wait(iommu); 3244 3245 out_unlock: 3246 spin_unlock_irqrestore(&iommu_table_lock, flags); 3247 3248 if (new_table) { 3249 iommu_free_pages(new_table->table, order); 3250 kfree(new_table); 3251 } 3252 return table; 3253 } 3254 3255 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, 3256 bool align, struct pci_dev *pdev, 3257 unsigned long max_irqs) 3258 { 3259 struct irq_remap_table *table; 3260 int index, c, alignment = 1; 3261 unsigned long flags; 3262 3263 table = alloc_irq_table(iommu, devid, pdev, max_irqs); 3264 if (!table) 3265 return -ENODEV; 3266 3267 if (align) 3268 alignment = roundup_pow_of_two(count); 3269 3270 raw_spin_lock_irqsave(&table->lock, flags); 3271 3272 /* Scan table for free entries */ 3273 for (index = ALIGN(table->min_index, alignment), c = 0; 3274 index < max_irqs;) { 3275 if (!iommu->irte_ops->is_allocated(table, index)) { 3276 c += 1; 3277 } else { 3278 c = 0; 3279 index = ALIGN(index + 1, alignment); 3280 continue; 3281 } 3282 3283 if (c == count) { 3284 for (; c != 0; --c) 3285 iommu->irte_ops->set_allocated(table, index - c + 1); 3286 3287 index -= count - 1; 3288 goto out; 3289 } 3290 3291 index++; 3292 } 3293 3294 index = -ENOSPC; 3295 3296 out: 3297 raw_spin_unlock_irqrestore(&table->lock, flags); 3298 3299 return index; 3300 } 3301 3302 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, 3303 struct irte_ga *irte) 3304 { 3305 struct irq_remap_table *table; 3306 struct irte_ga *entry; 3307 unsigned long flags; 3308 u128 old; 3309 3310 table = get_irq_table(iommu, devid); 3311 if (!table) 3312 return -ENOMEM; 3313 3314 raw_spin_lock_irqsave(&table->lock, flags); 3315 3316 entry = (struct irte_ga *)table->table; 3317 entry = &entry[index]; 3318 3319 /* 3320 * We use cmpxchg16 to atomically update the 128-bit IRTE, 3321 * and it cannot be updated by the hardware or other processors 3322 * behind us, so the return value of cmpxchg16 should be the 3323 * same as the old value. 3324 */ 3325 old = entry->irte; 3326 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); 3327 3328 raw_spin_unlock_irqrestore(&table->lock, flags); 3329 3330 return 0; 3331 } 3332 3333 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, 3334 struct irte_ga *irte) 3335 { 3336 bool ret; 3337 3338 ret = __modify_irte_ga(iommu, devid, index, irte); 3339 if (ret) 3340 return ret; 3341 3342 iommu_flush_irt_and_complete(iommu, devid); 3343 3344 return 0; 3345 } 3346 3347 static int modify_irte(struct amd_iommu *iommu, 3348 u16 devid, int index, union irte *irte) 3349 { 3350 struct irq_remap_table *table; 3351 unsigned long flags; 3352 3353 table = get_irq_table(iommu, devid); 3354 if (!table) 3355 return -ENOMEM; 3356 3357 raw_spin_lock_irqsave(&table->lock, flags); 3358 table->table[index] = irte->val; 3359 raw_spin_unlock_irqrestore(&table->lock, flags); 3360 3361 iommu_flush_irt_and_complete(iommu, devid); 3362 3363 return 0; 3364 } 3365 3366 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) 3367 { 3368 struct irq_remap_table *table; 3369 unsigned long flags; 3370 3371 table = get_irq_table(iommu, devid); 3372 if (!table) 3373 return; 3374 3375 raw_spin_lock_irqsave(&table->lock, flags); 3376 iommu->irte_ops->clear_allocated(table, index); 3377 raw_spin_unlock_irqrestore(&table->lock, flags); 3378 3379 iommu_flush_irt_and_complete(iommu, devid); 3380 } 3381 3382 static void irte_prepare(void *entry, 3383 u32 delivery_mode, bool dest_mode, 3384 u8 vector, u32 dest_apicid, int devid) 3385 { 3386 union irte *irte = (union irte *) entry; 3387 3388 irte->val = 0; 3389 irte->fields.vector = vector; 3390 irte->fields.int_type = delivery_mode; 3391 irte->fields.destination = dest_apicid; 3392 irte->fields.dm = dest_mode; 3393 irte->fields.valid = 1; 3394 } 3395 3396 static void irte_ga_prepare(void *entry, 3397 u32 delivery_mode, bool dest_mode, 3398 u8 vector, u32 dest_apicid, int devid) 3399 { 3400 struct irte_ga *irte = (struct irte_ga *) entry; 3401 3402 irte->lo.val = 0; 3403 irte->hi.val = 0; 3404 irte->lo.fields_remap.int_type = delivery_mode; 3405 irte->lo.fields_remap.dm = dest_mode; 3406 irte->hi.fields.vector = vector; 3407 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); 3408 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); 3409 irte->lo.fields_remap.valid = 1; 3410 } 3411 3412 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3413 { 3414 union irte *irte = (union irte *) entry; 3415 3416 irte->fields.valid = 1; 3417 modify_irte(iommu, devid, index, irte); 3418 } 3419 3420 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3421 { 3422 struct irte_ga *irte = (struct irte_ga *) entry; 3423 3424 irte->lo.fields_remap.valid = 1; 3425 modify_irte_ga(iommu, devid, index, irte); 3426 } 3427 3428 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3429 { 3430 union irte *irte = (union irte *) entry; 3431 3432 irte->fields.valid = 0; 3433 modify_irte(iommu, devid, index, irte); 3434 } 3435 3436 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) 3437 { 3438 struct irte_ga *irte = (struct irte_ga *) entry; 3439 3440 irte->lo.fields_remap.valid = 0; 3441 modify_irte_ga(iommu, devid, index, irte); 3442 } 3443 3444 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, 3445 u8 vector, u32 dest_apicid) 3446 { 3447 union irte *irte = (union irte *) entry; 3448 3449 irte->fields.vector = vector; 3450 irte->fields.destination = dest_apicid; 3451 modify_irte(iommu, devid, index, irte); 3452 } 3453 3454 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, 3455 u8 vector, u32 dest_apicid) 3456 { 3457 struct irte_ga *irte = (struct irte_ga *) entry; 3458 3459 if (!irte->lo.fields_remap.guest_mode) { 3460 irte->hi.fields.vector = vector; 3461 irte->lo.fields_remap.destination = 3462 APICID_TO_IRTE_DEST_LO(dest_apicid); 3463 irte->hi.fields.destination = 3464 APICID_TO_IRTE_DEST_HI(dest_apicid); 3465 modify_irte_ga(iommu, devid, index, irte); 3466 } 3467 } 3468 3469 #define IRTE_ALLOCATED (~1U) 3470 static void irte_set_allocated(struct irq_remap_table *table, int index) 3471 { 3472 table->table[index] = IRTE_ALLOCATED; 3473 } 3474 3475 static void irte_ga_set_allocated(struct irq_remap_table *table, int index) 3476 { 3477 struct irte_ga *ptr = (struct irte_ga *)table->table; 3478 struct irte_ga *irte = &ptr[index]; 3479 3480 memset(&irte->lo.val, 0, sizeof(u64)); 3481 memset(&irte->hi.val, 0, sizeof(u64)); 3482 irte->hi.fields.vector = 0xff; 3483 } 3484 3485 static bool irte_is_allocated(struct irq_remap_table *table, int index) 3486 { 3487 union irte *ptr = (union irte *)table->table; 3488 union irte *irte = &ptr[index]; 3489 3490 return irte->val != 0; 3491 } 3492 3493 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index) 3494 { 3495 struct irte_ga *ptr = (struct irte_ga *)table->table; 3496 struct irte_ga *irte = &ptr[index]; 3497 3498 return irte->hi.fields.vector != 0; 3499 } 3500 3501 static void irte_clear_allocated(struct irq_remap_table *table, int index) 3502 { 3503 table->table[index] = 0; 3504 } 3505 3506 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index) 3507 { 3508 struct irte_ga *ptr = (struct irte_ga *)table->table; 3509 struct irte_ga *irte = &ptr[index]; 3510 3511 memset(&irte->lo.val, 0, sizeof(u64)); 3512 memset(&irte->hi.val, 0, sizeof(u64)); 3513 } 3514 3515 static int get_devid(struct irq_alloc_info *info) 3516 { 3517 switch (info->type) { 3518 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3519 return get_ioapic_devid(info->devid); 3520 case X86_IRQ_ALLOC_TYPE_HPET: 3521 return get_hpet_devid(info->devid); 3522 case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3523 case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3524 return get_device_sbdf_id(msi_desc_to_dev(info->desc)); 3525 default: 3526 WARN_ON_ONCE(1); 3527 return -1; 3528 } 3529 } 3530 3531 struct irq_remap_ops amd_iommu_irq_ops = { 3532 .prepare = amd_iommu_prepare, 3533 .enable = amd_iommu_enable, 3534 .disable = amd_iommu_disable, 3535 .reenable = amd_iommu_reenable, 3536 .enable_faulting = amd_iommu_enable_faulting, 3537 }; 3538 3539 static void fill_msi_msg(struct msi_msg *msg, u32 index) 3540 { 3541 msg->data = index; 3542 msg->address_lo = 0; 3543 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; 3544 /* 3545 * The struct msi_msg.dest_mode_logical is used to set the DM bit 3546 * in MSI Message Address Register. For device w/ 2K int-remap support, 3547 * this is bit must be set to 1 regardless of the actual destination 3548 * mode, which is signified by the IRTE[DM]. 3549 */ 3550 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) 3551 msg->arch_addr_lo.dest_mode_logical = true; 3552 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; 3553 } 3554 3555 static void irq_remapping_prepare_irte(struct amd_ir_data *data, 3556 struct irq_cfg *irq_cfg, 3557 struct irq_alloc_info *info, 3558 int devid, int index, int sub_handle) 3559 { 3560 struct irq_2_irte *irte_info = &data->irq_2_irte; 3561 struct amd_iommu *iommu = data->iommu; 3562 3563 if (!iommu) 3564 return; 3565 3566 data->irq_2_irte.devid = devid; 3567 data->irq_2_irte.index = index + sub_handle; 3568 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, 3569 apic->dest_mode_logical, irq_cfg->vector, 3570 irq_cfg->dest_apicid, devid); 3571 3572 switch (info->type) { 3573 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3574 case X86_IRQ_ALLOC_TYPE_HPET: 3575 case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3576 case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3577 fill_msi_msg(&data->msi_entry, irte_info->index); 3578 break; 3579 3580 default: 3581 BUG_ON(1); 3582 break; 3583 } 3584 } 3585 3586 struct amd_irte_ops irte_32_ops = { 3587 .prepare = irte_prepare, 3588 .activate = irte_activate, 3589 .deactivate = irte_deactivate, 3590 .set_affinity = irte_set_affinity, 3591 .set_allocated = irte_set_allocated, 3592 .is_allocated = irte_is_allocated, 3593 .clear_allocated = irte_clear_allocated, 3594 }; 3595 3596 struct amd_irte_ops irte_128_ops = { 3597 .prepare = irte_ga_prepare, 3598 .activate = irte_ga_activate, 3599 .deactivate = irte_ga_deactivate, 3600 .set_affinity = irte_ga_set_affinity, 3601 .set_allocated = irte_ga_set_allocated, 3602 .is_allocated = irte_ga_is_allocated, 3603 .clear_allocated = irte_ga_clear_allocated, 3604 }; 3605 3606 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, 3607 unsigned int nr_irqs, void *arg) 3608 { 3609 struct irq_alloc_info *info = arg; 3610 struct irq_data *irq_data; 3611 struct amd_ir_data *data = NULL; 3612 struct amd_iommu *iommu; 3613 struct irq_cfg *cfg; 3614 struct iommu_dev_data *dev_data; 3615 unsigned long max_irqs; 3616 int i, ret, devid, seg, sbdf; 3617 int index; 3618 3619 if (!info) 3620 return -EINVAL; 3621 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) 3622 return -EINVAL; 3623 3624 sbdf = get_devid(info); 3625 if (sbdf < 0) 3626 return -EINVAL; 3627 3628 seg = PCI_SBDF_TO_SEGID(sbdf); 3629 devid = PCI_SBDF_TO_DEVID(sbdf); 3630 iommu = __rlookup_amd_iommu(seg, devid); 3631 if (!iommu) 3632 return -EINVAL; 3633 3634 dev_data = search_dev_data(iommu, devid); 3635 max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512; 3636 3637 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 3638 if (ret < 0) 3639 return ret; 3640 3641 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { 3642 struct irq_remap_table *table; 3643 3644 table = alloc_irq_table(iommu, devid, NULL, max_irqs); 3645 if (table) { 3646 if (!table->min_index) { 3647 /* 3648 * Keep the first 32 indexes free for IOAPIC 3649 * interrupts. 3650 */ 3651 table->min_index = 32; 3652 for (i = 0; i < 32; ++i) 3653 iommu->irte_ops->set_allocated(table, i); 3654 } 3655 WARN_ON(table->min_index != 32); 3656 index = info->ioapic.pin; 3657 } else { 3658 index = -ENOMEM; 3659 } 3660 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || 3661 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { 3662 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); 3663 3664 index = alloc_irq_index(iommu, devid, nr_irqs, align, 3665 msi_desc_to_pci_dev(info->desc), 3666 max_irqs); 3667 } else { 3668 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL, 3669 max_irqs); 3670 } 3671 3672 if (index < 0) { 3673 pr_warn("Failed to allocate IRTE\n"); 3674 ret = index; 3675 goto out_free_parent; 3676 } 3677 3678 for (i = 0; i < nr_irqs; i++) { 3679 irq_data = irq_domain_get_irq_data(domain, virq + i); 3680 cfg = irq_data ? irqd_cfg(irq_data) : NULL; 3681 if (!cfg) { 3682 ret = -EINVAL; 3683 goto out_free_data; 3684 } 3685 3686 ret = -ENOMEM; 3687 data = kzalloc(sizeof(*data), GFP_KERNEL); 3688 if (!data) 3689 goto out_free_data; 3690 3691 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3692 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); 3693 else 3694 data->entry = kzalloc(sizeof(struct irte_ga), 3695 GFP_KERNEL); 3696 if (!data->entry) { 3697 kfree(data); 3698 goto out_free_data; 3699 } 3700 3701 data->iommu = iommu; 3702 irq_data->hwirq = (devid << 16) + i; 3703 irq_data->chip_data = data; 3704 irq_data->chip = &amd_ir_chip; 3705 irq_remapping_prepare_irte(data, cfg, info, devid, index, i); 3706 } 3707 3708 return 0; 3709 3710 out_free_data: 3711 for (i--; i >= 0; i--) { 3712 irq_data = irq_domain_get_irq_data(domain, virq + i); 3713 if (irq_data) 3714 kfree(irq_data->chip_data); 3715 } 3716 for (i = 0; i < nr_irqs; i++) 3717 free_irte(iommu, devid, index + i); 3718 out_free_parent: 3719 irq_domain_free_irqs_common(domain, virq, nr_irqs); 3720 return ret; 3721 } 3722 3723 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, 3724 unsigned int nr_irqs) 3725 { 3726 struct irq_2_irte *irte_info; 3727 struct irq_data *irq_data; 3728 struct amd_ir_data *data; 3729 int i; 3730 3731 for (i = 0; i < nr_irqs; i++) { 3732 irq_data = irq_domain_get_irq_data(domain, virq + i); 3733 if (irq_data && irq_data->chip_data) { 3734 data = irq_data->chip_data; 3735 irte_info = &data->irq_2_irte; 3736 free_irte(data->iommu, irte_info->devid, irte_info->index); 3737 kfree(data->entry); 3738 kfree(data); 3739 } 3740 } 3741 irq_domain_free_irqs_common(domain, virq, nr_irqs); 3742 } 3743 3744 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, 3745 struct amd_ir_data *ir_data, 3746 struct irq_2_irte *irte_info, 3747 struct irq_cfg *cfg); 3748 3749 static int irq_remapping_activate(struct irq_domain *domain, 3750 struct irq_data *irq_data, bool reserve) 3751 { 3752 struct amd_ir_data *data = irq_data->chip_data; 3753 struct irq_2_irte *irte_info = &data->irq_2_irte; 3754 struct amd_iommu *iommu = data->iommu; 3755 struct irq_cfg *cfg = irqd_cfg(irq_data); 3756 3757 if (!iommu) 3758 return 0; 3759 3760 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, 3761 irte_info->index); 3762 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); 3763 return 0; 3764 } 3765 3766 static void irq_remapping_deactivate(struct irq_domain *domain, 3767 struct irq_data *irq_data) 3768 { 3769 struct amd_ir_data *data = irq_data->chip_data; 3770 struct irq_2_irte *irte_info = &data->irq_2_irte; 3771 struct amd_iommu *iommu = data->iommu; 3772 3773 if (iommu) 3774 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, 3775 irte_info->index); 3776 } 3777 3778 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec, 3779 enum irq_domain_bus_token bus_token) 3780 { 3781 struct amd_iommu *iommu; 3782 int devid = -1; 3783 3784 if (!amd_iommu_irq_remap) 3785 return 0; 3786 3787 if (x86_fwspec_is_ioapic(fwspec)) 3788 devid = get_ioapic_devid(fwspec->param[0]); 3789 else if (x86_fwspec_is_hpet(fwspec)) 3790 devid = get_hpet_devid(fwspec->param[0]); 3791 3792 if (devid < 0) 3793 return 0; 3794 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); 3795 3796 return iommu && iommu->ir_domain == d; 3797 } 3798 3799 static const struct irq_domain_ops amd_ir_domain_ops = { 3800 .select = irq_remapping_select, 3801 .alloc = irq_remapping_alloc, 3802 .free = irq_remapping_free, 3803 .activate = irq_remapping_activate, 3804 .deactivate = irq_remapping_deactivate, 3805 }; 3806 3807 int amd_iommu_activate_guest_mode(void *data) 3808 { 3809 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 3810 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3811 u64 valid; 3812 3813 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry) 3814 return 0; 3815 3816 valid = entry->lo.fields_vapic.valid; 3817 3818 entry->lo.val = 0; 3819 entry->hi.val = 0; 3820 3821 entry->lo.fields_vapic.valid = valid; 3822 entry->lo.fields_vapic.guest_mode = 1; 3823 entry->lo.fields_vapic.ga_log_intr = 1; 3824 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; 3825 entry->hi.fields.vector = ir_data->ga_vector; 3826 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; 3827 3828 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 3829 ir_data->irq_2_irte.index, entry); 3830 } 3831 EXPORT_SYMBOL(amd_iommu_activate_guest_mode); 3832 3833 int amd_iommu_deactivate_guest_mode(void *data) 3834 { 3835 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 3836 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3837 struct irq_cfg *cfg = ir_data->cfg; 3838 u64 valid; 3839 3840 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || 3841 !entry || !entry->lo.fields_vapic.guest_mode) 3842 return 0; 3843 3844 valid = entry->lo.fields_remap.valid; 3845 3846 entry->lo.val = 0; 3847 entry->hi.val = 0; 3848 3849 entry->lo.fields_remap.valid = valid; 3850 entry->lo.fields_remap.dm = apic->dest_mode_logical; 3851 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; 3852 entry->hi.fields.vector = cfg->vector; 3853 entry->lo.fields_remap.destination = 3854 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); 3855 entry->hi.fields.destination = 3856 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); 3857 3858 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 3859 ir_data->irq_2_irte.index, entry); 3860 } 3861 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); 3862 3863 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) 3864 { 3865 int ret; 3866 struct amd_iommu_pi_data *pi_data = vcpu_info; 3867 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; 3868 struct amd_ir_data *ir_data = data->chip_data; 3869 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; 3870 struct iommu_dev_data *dev_data; 3871 3872 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) 3873 return -EINVAL; 3874 3875 if (ir_data->iommu == NULL) 3876 return -EINVAL; 3877 3878 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); 3879 3880 /* Note: 3881 * This device has never been set up for guest mode. 3882 * we should not modify the IRTE 3883 */ 3884 if (!dev_data || !dev_data->use_vapic) 3885 return -EINVAL; 3886 3887 ir_data->cfg = irqd_cfg(data); 3888 pi_data->ir_data = ir_data; 3889 3890 pi_data->prev_ga_tag = ir_data->cached_ga_tag; 3891 if (pi_data->is_guest_mode) { 3892 ir_data->ga_root_ptr = (pi_data->base >> 12); 3893 ir_data->ga_vector = vcpu_pi_info->vector; 3894 ir_data->ga_tag = pi_data->ga_tag; 3895 ret = amd_iommu_activate_guest_mode(ir_data); 3896 if (!ret) 3897 ir_data->cached_ga_tag = pi_data->ga_tag; 3898 } else { 3899 ret = amd_iommu_deactivate_guest_mode(ir_data); 3900 3901 /* 3902 * This communicates the ga_tag back to the caller 3903 * so that it can do all the necessary clean up. 3904 */ 3905 if (!ret) 3906 ir_data->cached_ga_tag = 0; 3907 } 3908 3909 return ret; 3910 } 3911 3912 3913 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, 3914 struct amd_ir_data *ir_data, 3915 struct irq_2_irte *irte_info, 3916 struct irq_cfg *cfg) 3917 { 3918 3919 /* 3920 * Atomically updates the IRTE with the new destination, vector 3921 * and flushes the interrupt entry cache. 3922 */ 3923 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, 3924 irte_info->index, cfg->vector, 3925 cfg->dest_apicid); 3926 } 3927 3928 static int amd_ir_set_affinity(struct irq_data *data, 3929 const struct cpumask *mask, bool force) 3930 { 3931 struct amd_ir_data *ir_data = data->chip_data; 3932 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; 3933 struct irq_cfg *cfg = irqd_cfg(data); 3934 struct irq_data *parent = data->parent_data; 3935 struct amd_iommu *iommu = ir_data->iommu; 3936 int ret; 3937 3938 if (!iommu) 3939 return -ENODEV; 3940 3941 ret = parent->chip->irq_set_affinity(parent, mask, force); 3942 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 3943 return ret; 3944 3945 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); 3946 /* 3947 * After this point, all the interrupts will start arriving 3948 * at the new destination. So, time to cleanup the previous 3949 * vector allocation. 3950 */ 3951 vector_schedule_cleanup(cfg); 3952 3953 return IRQ_SET_MASK_OK_DONE; 3954 } 3955 3956 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) 3957 { 3958 struct amd_ir_data *ir_data = irq_data->chip_data; 3959 3960 *msg = ir_data->msi_entry; 3961 } 3962 3963 static struct irq_chip amd_ir_chip = { 3964 .name = "AMD-IR", 3965 .irq_ack = apic_ack_irq, 3966 .irq_set_affinity = amd_ir_set_affinity, 3967 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity, 3968 .irq_compose_msi_msg = ir_compose_msi_msg, 3969 }; 3970 3971 static const struct msi_parent_ops amdvi_msi_parent_ops = { 3972 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, 3973 .prefix = "IR-", 3974 .init_dev_msi_info = msi_parent_init_dev_msi_info, 3975 }; 3976 3977 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) 3978 { 3979 struct fwnode_handle *fn; 3980 3981 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); 3982 if (!fn) 3983 return -ENOMEM; 3984 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, 3985 fn, &amd_ir_domain_ops, iommu); 3986 if (!iommu->ir_domain) { 3987 irq_domain_free_fwnode(fn); 3988 return -ENOMEM; 3989 } 3990 3991 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); 3992 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | 3993 IRQ_DOMAIN_FLAG_ISOLATED_MSI; 3994 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; 3995 3996 return 0; 3997 } 3998 3999 int amd_iommu_update_ga(int cpu, bool is_run, void *data) 4000 { 4001 struct amd_ir_data *ir_data = (struct amd_ir_data *)data; 4002 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 4003 4004 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || 4005 !entry || !entry->lo.fields_vapic.guest_mode) 4006 return 0; 4007 4008 if (!ir_data->iommu) 4009 return -ENODEV; 4010 4011 if (cpu >= 0) { 4012 entry->lo.fields_vapic.destination = 4013 APICID_TO_IRTE_DEST_LO(cpu); 4014 entry->hi.fields.destination = 4015 APICID_TO_IRTE_DEST_HI(cpu); 4016 } 4017 entry->lo.fields_vapic.is_run = is_run; 4018 4019 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, 4020 ir_data->irq_2_irte.index, entry); 4021 } 4022 EXPORT_SYMBOL(amd_iommu_update_ga); 4023 #endif 4024