1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/syscore_ops.h> 16 #include <linux/interrupt.h> 17 #include <linux/msi.h> 18 #include <linux/irq.h> 19 #include <linux/amd-iommu.h> 20 #include <linux/export.h> 21 #include <linux/kmemleak.h> 22 #include <linux/cc_platform.h> 23 #include <linux/iopoll.h> 24 #include <asm/pci-direct.h> 25 #include <asm/iommu.h> 26 #include <asm/apic.h> 27 #include <asm/gart.h> 28 #include <asm/x86_init.h> 29 #include <asm/io_apic.h> 30 #include <asm/irq_remapping.h> 31 #include <asm/set_memory.h> 32 #include <asm/sev.h> 33 34 #include <linux/crash_dump.h> 35 36 #include "amd_iommu.h" 37 #include "../irq_remapping.h" 38 #include "../iommu-pages.h" 39 40 /* 41 * definitions for the ACPI scanning code 42 */ 43 #define IVRS_HEADER_LENGTH 48 44 45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 46 #define ACPI_IVMD_TYPE_ALL 0x20 47 #define ACPI_IVMD_TYPE 0x21 48 #define ACPI_IVMD_TYPE_RANGE 0x22 49 50 #define IVHD_DEV_ALL 0x01 51 #define IVHD_DEV_SELECT 0x02 52 #define IVHD_DEV_SELECT_RANGE_START 0x03 53 #define IVHD_DEV_RANGE_END 0x04 54 #define IVHD_DEV_ALIAS 0x42 55 #define IVHD_DEV_ALIAS_RANGE 0x43 56 #define IVHD_DEV_EXT_SELECT 0x46 57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 58 #define IVHD_DEV_SPECIAL 0x48 59 #define IVHD_DEV_ACPI_HID 0xf0 60 61 #define UID_NOT_PRESENT 0 62 #define UID_IS_INTEGER 1 63 #define UID_IS_CHARACTER 2 64 65 #define IVHD_SPECIAL_IOAPIC 1 66 #define IVHD_SPECIAL_HPET 2 67 68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 71 #define IVHD_FLAG_ISOC_EN_MASK 0x08 72 73 #define IVMD_FLAG_EXCL_RANGE 0x08 74 #define IVMD_FLAG_IW 0x04 75 #define IVMD_FLAG_IR 0x02 76 #define IVMD_FLAG_UNITY_MAP 0x01 77 78 #define ACPI_DEVFLAG_INITPASS 0x01 79 #define ACPI_DEVFLAG_EXTINT 0x02 80 #define ACPI_DEVFLAG_NMI 0x04 81 #define ACPI_DEVFLAG_SYSMGT1 0x10 82 #define ACPI_DEVFLAG_SYSMGT2 0x20 83 #define ACPI_DEVFLAG_LINT0 0x40 84 #define ACPI_DEVFLAG_LINT1 0x80 85 #define ACPI_DEVFLAG_ATSDIS 0x10000000 86 87 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ 88 | ((dev & 0x1f) << 3) | (fn & 0x7)) 89 90 /* 91 * ACPI table definitions 92 * 93 * These data structures are laid over the table to parse the important values 94 * out of it. 95 */ 96 97 /* 98 * structure describing one IOMMU in the ACPI table. Typically followed by one 99 * or more ivhd_entrys. 100 */ 101 struct ivhd_header { 102 u8 type; 103 u8 flags; 104 u16 length; 105 u16 devid; 106 u16 cap_ptr; 107 u64 mmio_phys; 108 u16 pci_seg; 109 u16 info; 110 u32 efr_attr; 111 112 /* Following only valid on IVHD type 11h and 40h */ 113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 114 u64 efr_reg2; 115 } __attribute__((packed)); 116 117 /* 118 * A device entry describing which devices a specific IOMMU translates and 119 * which requestor ids they use. 120 */ 121 struct ivhd_entry { 122 u8 type; 123 u16 devid; 124 u8 flags; 125 struct_group(ext_hid, 126 u32 ext; 127 u32 hidh; 128 ); 129 u64 cid; 130 u8 uidf; 131 u8 uidl; 132 u8 uid; 133 } __attribute__((packed)); 134 135 /* 136 * An AMD IOMMU memory definition structure. It defines things like exclusion 137 * ranges for devices and regions that should be unity mapped. 138 */ 139 struct ivmd_header { 140 u8 type; 141 u8 flags; 142 u16 length; 143 u16 devid; 144 u16 aux; 145 u16 pci_seg; 146 u8 resv[6]; 147 u64 range_start; 148 u64 range_length; 149 } __attribute__((packed)); 150 151 bool amd_iommu_dump; 152 bool amd_iommu_irq_remap __read_mostly; 153 154 enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1; 155 /* Host page table level */ 156 u8 amd_iommu_hpt_level; 157 /* Guest page table level */ 158 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; 159 160 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 161 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 162 163 static bool amd_iommu_detected; 164 static bool amd_iommu_disabled __initdata; 165 static bool amd_iommu_force_enable __initdata; 166 static bool amd_iommu_irtcachedis; 167 static int amd_iommu_target_ivhd_type; 168 169 /* Global EFR and EFR2 registers */ 170 u64 amd_iommu_efr; 171 u64 amd_iommu_efr2; 172 173 /* Host (v1) page table is not supported*/ 174 bool amd_iommu_hatdis; 175 176 /* SNP is enabled on the system? */ 177 bool amd_iommu_snp_en; 178 EXPORT_SYMBOL(amd_iommu_snp_en); 179 180 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ 181 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ 182 LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */ 183 184 /* Number of IOMMUs present in the system */ 185 static int amd_iommus_present; 186 187 /* IOMMUs have a non-present cache? */ 188 bool amd_iommu_np_cache __read_mostly; 189 bool amd_iommu_iotlb_sup __read_mostly = true; 190 191 static bool amd_iommu_pc_present __read_mostly; 192 bool amdr_ivrs_remap_support __read_mostly; 193 194 bool amd_iommu_force_isolation __read_mostly; 195 196 unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES; 197 198 enum iommu_init_state { 199 IOMMU_START_STATE, 200 IOMMU_IVRS_DETECTED, 201 IOMMU_ACPI_FINISHED, 202 IOMMU_ENABLED, 203 IOMMU_PCI_INIT, 204 IOMMU_INTERRUPTS_EN, 205 IOMMU_INITIALIZED, 206 IOMMU_NOT_FOUND, 207 IOMMU_INIT_ERROR, 208 IOMMU_CMDLINE_DISABLED, 209 }; 210 211 /* Early ioapic and hpet maps from kernel command line */ 212 #define EARLY_MAP_SIZE 4 213 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 214 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 215 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 216 217 static int __initdata early_ioapic_map_size; 218 static int __initdata early_hpet_map_size; 219 static int __initdata early_acpihid_map_size; 220 221 static bool __initdata cmdline_maps; 222 223 static enum iommu_init_state init_state = IOMMU_START_STATE; 224 225 static int amd_iommu_enable_interrupts(void); 226 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); 227 228 static bool amd_iommu_pre_enabled = true; 229 230 static u32 amd_iommu_ivinfo __initdata; 231 232 bool translation_pre_enabled(struct amd_iommu *iommu) 233 { 234 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 235 } 236 237 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 238 { 239 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 240 } 241 242 static void init_translation_status(struct amd_iommu *iommu) 243 { 244 u64 ctrl; 245 246 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 247 if (ctrl & (1<<CONTROL_IOMMU_EN)) 248 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 249 } 250 251 int amd_iommu_get_num_iommus(void) 252 { 253 return amd_iommus_present; 254 } 255 256 bool amd_iommu_ht_range_ignore(void) 257 { 258 return check_feature2(FEATURE_HT_RANGE_IGNORE); 259 } 260 261 /* 262 * Iterate through all the IOMMUs to get common EFR 263 * masks among all IOMMUs and warn if found inconsistency. 264 */ 265 static __init void get_global_efr(void) 266 { 267 struct amd_iommu *iommu; 268 269 for_each_iommu(iommu) { 270 u64 tmp = iommu->features; 271 u64 tmp2 = iommu->features2; 272 273 if (list_is_first(&iommu->list, &amd_iommu_list)) { 274 amd_iommu_efr = tmp; 275 amd_iommu_efr2 = tmp2; 276 continue; 277 } 278 279 if (amd_iommu_efr == tmp && 280 amd_iommu_efr2 == tmp2) 281 continue; 282 283 pr_err(FW_BUG 284 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", 285 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, 286 iommu->index, iommu->pci_seg->id, 287 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), 288 PCI_FUNC(iommu->devid)); 289 290 amd_iommu_efr &= tmp; 291 amd_iommu_efr2 &= tmp2; 292 } 293 294 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); 295 } 296 297 /* 298 * For IVHD type 0x11/0x40, EFR is also available via IVHD. 299 * Default to IVHD EFR since it is available sooner 300 * (i.e. before PCI init). 301 */ 302 static void __init early_iommu_features_init(struct amd_iommu *iommu, 303 struct ivhd_header *h) 304 { 305 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { 306 iommu->features = h->efr_reg; 307 iommu->features2 = h->efr_reg2; 308 } 309 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) 310 amdr_ivrs_remap_support = true; 311 } 312 313 /* Access to l1 and l2 indexed register spaces */ 314 315 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 316 { 317 u32 val; 318 319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 320 pci_read_config_dword(iommu->dev, 0xfc, &val); 321 return val; 322 } 323 324 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 325 { 326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 327 pci_write_config_dword(iommu->dev, 0xfc, val); 328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 329 } 330 331 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 332 { 333 u32 val; 334 335 pci_write_config_dword(iommu->dev, 0xf0, address); 336 pci_read_config_dword(iommu->dev, 0xf4, &val); 337 return val; 338 } 339 340 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 341 { 342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 343 pci_write_config_dword(iommu->dev, 0xf4, val); 344 } 345 346 /**************************************************************************** 347 * 348 * AMD IOMMU MMIO register space handling functions 349 * 350 * These functions are used to program the IOMMU device registers in 351 * MMIO space required for that driver. 352 * 353 ****************************************************************************/ 354 355 /* 356 * This function set the exclusion range in the IOMMU. DMA accesses to the 357 * exclusion range are passed through untranslated 358 */ 359 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 360 { 361 u64 start = iommu->exclusion_start & PAGE_MASK; 362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 363 u64 entry; 364 365 if (!iommu->exclusion_start) 366 return; 367 368 entry = start | MMIO_EXCL_ENABLE_MASK; 369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 370 &entry, sizeof(entry)); 371 372 entry = limit; 373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 374 &entry, sizeof(entry)); 375 } 376 377 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 378 { 379 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 380 u64 entry = start & PM_ADDR_MASK; 381 382 if (!check_feature(FEATURE_SNP)) 383 return; 384 385 /* Note: 386 * Re-purpose Exclusion base/limit registers for Completion wait 387 * write-back base/limit. 388 */ 389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 390 &entry, sizeof(entry)); 391 392 /* Note: 393 * Default to 4 Kbytes, which can be specified by setting base 394 * address equal to the limit address. 395 */ 396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 397 &entry, sizeof(entry)); 398 } 399 400 /* Programs the physical address of the device table into the IOMMU hardware */ 401 static void iommu_set_device_table(struct amd_iommu *iommu) 402 { 403 u64 entry; 404 u32 dev_table_size = iommu->pci_seg->dev_table_size; 405 void *dev_table = (void *)get_dev_table(iommu); 406 407 BUG_ON(iommu->mmio_base == NULL); 408 409 if (is_kdump_kernel()) 410 return; 411 412 entry = iommu_virt_to_phys(dev_table); 413 entry |= (dev_table_size >> 12) - 1; 414 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 415 &entry, sizeof(entry)); 416 } 417 418 static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift) 419 { 420 u64 ctrl; 421 422 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 423 mask <<= shift; 424 ctrl &= ~mask; 425 ctrl |= (val << shift) & mask; 426 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 427 } 428 429 /* Generic functions to enable/disable certain features of the IOMMU. */ 430 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 431 { 432 iommu_feature_set(iommu, 1ULL, 1ULL, bit); 433 } 434 435 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 436 { 437 iommu_feature_set(iommu, 0ULL, 1ULL, bit); 438 } 439 440 /* Function to enable the hardware */ 441 static void iommu_enable(struct amd_iommu *iommu) 442 { 443 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 444 } 445 446 static void iommu_disable(struct amd_iommu *iommu) 447 { 448 if (!iommu->mmio_base) 449 return; 450 451 /* Disable command buffer */ 452 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 453 454 /* Disable event logging and event interrupts */ 455 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 456 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 457 458 /* Disable IOMMU GA_LOG */ 459 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 460 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 461 462 /* Disable IOMMU PPR logging */ 463 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); 464 iommu_feature_disable(iommu, CONTROL_PPRINT_EN); 465 466 /* Disable IOMMU hardware itself */ 467 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 468 469 /* Clear IRTE cache disabling bit */ 470 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 471 } 472 473 /* 474 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 475 * the system has one. 476 */ 477 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 478 { 479 if (!request_mem_region(address, end, "amd_iommu")) { 480 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 481 address, end); 482 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 483 return NULL; 484 } 485 486 return (u8 __iomem *)ioremap(address, end); 487 } 488 489 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 490 { 491 if (iommu->mmio_base) 492 iounmap(iommu->mmio_base); 493 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 494 } 495 496 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 497 { 498 u32 size = 0; 499 500 switch (h->type) { 501 case 0x10: 502 size = 24; 503 break; 504 case 0x11: 505 case 0x40: 506 size = 40; 507 break; 508 } 509 return size; 510 } 511 512 /**************************************************************************** 513 * 514 * The functions below belong to the first pass of AMD IOMMU ACPI table 515 * parsing. In this pass we try to find out the highest device id this 516 * code has to handle. Upon this information the size of the shared data 517 * structures is determined later. 518 * 519 ****************************************************************************/ 520 521 /* 522 * This function calculates the length of a given IVHD entry 523 */ 524 static inline int ivhd_entry_length(u8 *ivhd) 525 { 526 u32 type = ((struct ivhd_entry *)ivhd)->type; 527 528 if (type < 0x80) { 529 return 0x04 << (*ivhd >> 6); 530 } else if (type == IVHD_DEV_ACPI_HID) { 531 /* For ACPI_HID, offset 21 is uid len */ 532 return *((u8 *)ivhd + 21) + 22; 533 } 534 return 0; 535 } 536 537 /* 538 * After reading the highest device id from the IOMMU PCI capability header 539 * this function looks if there is a higher device id defined in the ACPI table 540 */ 541 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 542 { 543 u8 *p = (void *)h, *end = (void *)h; 544 struct ivhd_entry *dev; 545 int last_devid = -EINVAL; 546 547 u32 ivhd_size = get_ivhd_header_size(h); 548 549 if (!ivhd_size) { 550 pr_err("Unsupported IVHD type %#x\n", h->type); 551 return -EINVAL; 552 } 553 554 p += ivhd_size; 555 end += h->length; 556 557 while (p < end) { 558 dev = (struct ivhd_entry *)p; 559 switch (dev->type) { 560 case IVHD_DEV_ALL: 561 /* Use maximum BDF value for DEV_ALL */ 562 return 0xffff; 563 case IVHD_DEV_SELECT: 564 case IVHD_DEV_RANGE_END: 565 case IVHD_DEV_ALIAS: 566 case IVHD_DEV_EXT_SELECT: 567 /* all the above subfield types refer to device ids */ 568 if (dev->devid > last_devid) 569 last_devid = dev->devid; 570 break; 571 default: 572 break; 573 } 574 p += ivhd_entry_length(p); 575 } 576 577 WARN_ON(p != end); 578 579 return last_devid; 580 } 581 582 static int __init check_ivrs_checksum(struct acpi_table_header *table) 583 { 584 int i; 585 u8 checksum = 0, *p = (u8 *)table; 586 587 for (i = 0; i < table->length; ++i) 588 checksum += p[i]; 589 if (checksum != 0) { 590 /* ACPI table corrupt */ 591 pr_err(FW_BUG "IVRS invalid checksum\n"); 592 return -ENODEV; 593 } 594 595 return 0; 596 } 597 598 /* 599 * Iterate over all IVHD entries in the ACPI table and find the highest device 600 * id which we need to handle. This is the first of three functions which parse 601 * the ACPI table. So we check the checksum here. 602 */ 603 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) 604 { 605 u8 *p = (u8 *)table, *end = (u8 *)table; 606 struct ivhd_header *h; 607 int last_devid, last_bdf = 0; 608 609 p += IVRS_HEADER_LENGTH; 610 611 end += table->length; 612 while (p < end) { 613 h = (struct ivhd_header *)p; 614 if (h->pci_seg == pci_seg && 615 h->type == amd_iommu_target_ivhd_type) { 616 last_devid = find_last_devid_from_ivhd(h); 617 618 if (last_devid < 0) 619 return -EINVAL; 620 if (last_devid > last_bdf) 621 last_bdf = last_devid; 622 } 623 p += h->length; 624 } 625 WARN_ON(p != end); 626 627 return last_bdf; 628 } 629 630 /**************************************************************************** 631 * 632 * The following functions belong to the code path which parses the ACPI table 633 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 634 * data structures, initialize the per PCI segment device/alias/rlookup table 635 * and also basically initialize the hardware. 636 * 637 ****************************************************************************/ 638 639 /* Allocate per PCI segment device table */ 640 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) 641 { 642 pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, 643 pci_seg->dev_table_size); 644 if (!pci_seg->dev_table) 645 return -ENOMEM; 646 647 return 0; 648 } 649 650 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) 651 { 652 if (is_kdump_kernel()) 653 memunmap((void *)pci_seg->dev_table); 654 else 655 iommu_free_pages(pci_seg->dev_table); 656 pci_seg->dev_table = NULL; 657 } 658 659 /* Allocate per PCI segment IOMMU rlookup table. */ 660 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 661 { 662 pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1, 663 sizeof(*pci_seg->rlookup_table), 664 GFP_KERNEL); 665 if (pci_seg->rlookup_table == NULL) 666 return -ENOMEM; 667 668 return 0; 669 } 670 671 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 672 { 673 kvfree(pci_seg->rlookup_table); 674 pci_seg->rlookup_table = NULL; 675 } 676 677 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 678 { 679 pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1, 680 sizeof(*pci_seg->irq_lookup_table), 681 GFP_KERNEL); 682 if (pci_seg->irq_lookup_table == NULL) 683 return -ENOMEM; 684 685 return 0; 686 } 687 688 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 689 { 690 kvfree(pci_seg->irq_lookup_table); 691 pci_seg->irq_lookup_table = NULL; 692 } 693 694 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) 695 { 696 int i; 697 698 pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1, 699 sizeof(*pci_seg->alias_table), 700 GFP_KERNEL); 701 if (!pci_seg->alias_table) 702 return -ENOMEM; 703 704 /* 705 * let all alias entries point to itself 706 */ 707 for (i = 0; i <= pci_seg->last_bdf; ++i) 708 pci_seg->alias_table[i] = i; 709 710 return 0; 711 } 712 713 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) 714 { 715 kvfree(pci_seg->alias_table); 716 pci_seg->alias_table = NULL; 717 } 718 719 static inline void *iommu_memremap(unsigned long paddr, size_t size) 720 { 721 phys_addr_t phys; 722 723 if (!paddr) 724 return NULL; 725 726 /* 727 * Obtain true physical address in kdump kernel when SME is enabled. 728 * Currently, previous kernel with SME enabled and kdump kernel 729 * with SME support disabled is not supported. 730 */ 731 phys = __sme_clr(paddr); 732 733 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 734 return (__force void *)ioremap_encrypted(phys, size); 735 else 736 return memremap(phys, size, MEMREMAP_WB); 737 } 738 739 /* 740 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 741 * write commands to that buffer later and the IOMMU will execute them 742 * asynchronously 743 */ 744 static int __init alloc_command_buffer(struct amd_iommu *iommu) 745 { 746 iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE); 747 748 return iommu->cmd_buf ? 0 : -ENOMEM; 749 } 750 751 /* 752 * Interrupt handler has processed all pending events and adjusted head 753 * and tail pointer. Reset overflow mask and restart logging again. 754 */ 755 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, 756 u8 cntrl_intr, u8 cntrl_log, 757 u32 status_run_mask, u32 status_overflow_mask) 758 { 759 u32 status; 760 761 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 762 if (status & status_run_mask) 763 return; 764 765 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); 766 767 iommu_feature_disable(iommu, cntrl_log); 768 iommu_feature_disable(iommu, cntrl_intr); 769 770 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 771 772 iommu_feature_enable(iommu, cntrl_intr); 773 iommu_feature_enable(iommu, cntrl_log); 774 } 775 776 /* 777 * This function restarts event logging in case the IOMMU experienced 778 * an event log buffer overflow. 779 */ 780 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) 781 { 782 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, 783 CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, 784 MMIO_STATUS_EVT_OVERFLOW_MASK); 785 } 786 787 /* 788 * This function restarts event logging in case the IOMMU experienced 789 * GA log overflow. 790 */ 791 void amd_iommu_restart_ga_log(struct amd_iommu *iommu) 792 { 793 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, 794 CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, 795 MMIO_STATUS_GALOG_OVERFLOW_MASK); 796 } 797 798 /* 799 * This function resets the command buffer if the IOMMU stopped fetching 800 * commands from it. 801 */ 802 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 803 { 804 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 805 806 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 807 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 808 iommu->cmd_buf_head = 0; 809 iommu->cmd_buf_tail = 0; 810 811 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 812 } 813 814 /* 815 * This function writes the command buffer address to the hardware and 816 * enables it. 817 */ 818 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 819 { 820 u64 entry; 821 822 BUG_ON(iommu->cmd_buf == NULL); 823 824 if (!is_kdump_kernel()) { 825 /* 826 * Command buffer is re-used for kdump kernel and setting 827 * of MMIO register is not required. 828 */ 829 entry = iommu_virt_to_phys(iommu->cmd_buf); 830 entry |= MMIO_CMD_SIZE_512; 831 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 832 &entry, sizeof(entry)); 833 } 834 835 amd_iommu_reset_cmd_buffer(iommu); 836 } 837 838 /* 839 * This function disables the command buffer 840 */ 841 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 842 { 843 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 844 } 845 846 static void __init free_command_buffer(struct amd_iommu *iommu) 847 { 848 iommu_free_pages(iommu->cmd_buf); 849 } 850 851 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, 852 size_t size) 853 { 854 void *buf; 855 856 size = PAGE_ALIGN(size); 857 buf = iommu_alloc_pages_sz(gfp, size); 858 if (!buf) 859 return NULL; 860 if (check_feature(FEATURE_SNP) && 861 set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) { 862 iommu_free_pages(buf); 863 return NULL; 864 } 865 866 return buf; 867 } 868 869 /* allocates the memory where the IOMMU will log its events to */ 870 static int __init alloc_event_buffer(struct amd_iommu *iommu) 871 { 872 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 873 EVT_BUFFER_SIZE); 874 875 return iommu->evt_buf ? 0 : -ENOMEM; 876 } 877 878 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 879 { 880 u64 entry; 881 882 BUG_ON(iommu->evt_buf == NULL); 883 884 if (!is_kdump_kernel()) { 885 /* 886 * Event buffer is re-used for kdump kernel and setting 887 * of MMIO register is not required. 888 */ 889 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 890 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 891 &entry, sizeof(entry)); 892 } 893 894 /* set head and tail to zero manually */ 895 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 896 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 897 898 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 899 } 900 901 /* 902 * This function disables the event log buffer 903 */ 904 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 905 { 906 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 907 } 908 909 static void __init free_event_buffer(struct amd_iommu *iommu) 910 { 911 iommu_free_pages(iommu->evt_buf); 912 } 913 914 static void free_ga_log(struct amd_iommu *iommu) 915 { 916 #ifdef CONFIG_IRQ_REMAP 917 iommu_free_pages(iommu->ga_log); 918 iommu_free_pages(iommu->ga_log_tail); 919 #endif 920 } 921 922 #ifdef CONFIG_IRQ_REMAP 923 static int iommu_ga_log_enable(struct amd_iommu *iommu) 924 { 925 u32 status, i; 926 u64 entry; 927 928 if (!iommu->ga_log) 929 return -EINVAL; 930 931 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 932 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 933 &entry, sizeof(entry)); 934 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 935 (BIT_ULL(52)-1)) & ~7ULL; 936 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 937 &entry, sizeof(entry)); 938 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 939 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 940 941 942 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 943 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 944 945 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { 946 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 947 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 948 break; 949 udelay(10); 950 } 951 952 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) 953 return -EINVAL; 954 955 return 0; 956 } 957 958 static int iommu_init_ga_log(struct amd_iommu *iommu) 959 { 960 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 961 return 0; 962 963 iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE); 964 if (!iommu->ga_log) 965 goto err_out; 966 967 iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8); 968 if (!iommu->ga_log_tail) 969 goto err_out; 970 971 return 0; 972 err_out: 973 free_ga_log(iommu); 974 return -EINVAL; 975 } 976 #endif /* CONFIG_IRQ_REMAP */ 977 978 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 979 { 980 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); 981 if (!iommu->cmd_sem) 982 return -ENOMEM; 983 iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); 984 return 0; 985 } 986 987 static int __init remap_event_buffer(struct amd_iommu *iommu) 988 { 989 u64 paddr; 990 991 pr_info_once("Re-using event buffer from the previous kernel\n"); 992 paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK; 993 iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE); 994 995 return iommu->evt_buf ? 0 : -ENOMEM; 996 } 997 998 static int __init remap_command_buffer(struct amd_iommu *iommu) 999 { 1000 u64 paddr; 1001 1002 pr_info_once("Re-using command buffer from the previous kernel\n"); 1003 paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK; 1004 iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE); 1005 1006 return iommu->cmd_buf ? 0 : -ENOMEM; 1007 } 1008 1009 static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu) 1010 { 1011 u64 paddr; 1012 1013 if (check_feature(FEATURE_SNP)) { 1014 /* 1015 * When SNP is enabled, the exclusion base register is used for the 1016 * completion wait buffer (CWB) address. Read and re-use it. 1017 */ 1018 pr_info_once("Re-using CWB buffers from the previous kernel\n"); 1019 paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK; 1020 iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE); 1021 if (!iommu->cmd_sem) 1022 return -ENOMEM; 1023 iommu->cmd_sem_paddr = paddr; 1024 } else { 1025 return alloc_cwwb_sem(iommu); 1026 } 1027 1028 return 0; 1029 } 1030 1031 static int __init alloc_iommu_buffers(struct amd_iommu *iommu) 1032 { 1033 int ret; 1034 1035 /* 1036 * Reuse/Remap the previous kernel's allocated completion wait 1037 * command and event buffers for kdump boot. 1038 */ 1039 if (is_kdump_kernel()) { 1040 ret = remap_or_alloc_cwwb_sem(iommu); 1041 if (ret) 1042 return ret; 1043 1044 ret = remap_command_buffer(iommu); 1045 if (ret) 1046 return ret; 1047 1048 ret = remap_event_buffer(iommu); 1049 if (ret) 1050 return ret; 1051 } else { 1052 ret = alloc_cwwb_sem(iommu); 1053 if (ret) 1054 return ret; 1055 1056 ret = alloc_command_buffer(iommu); 1057 if (ret) 1058 return ret; 1059 1060 ret = alloc_event_buffer(iommu); 1061 if (ret) 1062 return ret; 1063 } 1064 1065 return 0; 1066 } 1067 1068 static void __init free_cwwb_sem(struct amd_iommu *iommu) 1069 { 1070 if (iommu->cmd_sem) 1071 iommu_free_pages((void *)iommu->cmd_sem); 1072 } 1073 static void __init unmap_cwwb_sem(struct amd_iommu *iommu) 1074 { 1075 if (iommu->cmd_sem) { 1076 if (check_feature(FEATURE_SNP)) 1077 memunmap((void *)iommu->cmd_sem); 1078 else 1079 iommu_free_pages((void *)iommu->cmd_sem); 1080 } 1081 } 1082 1083 static void __init unmap_command_buffer(struct amd_iommu *iommu) 1084 { 1085 memunmap((void *)iommu->cmd_buf); 1086 } 1087 1088 static void __init unmap_event_buffer(struct amd_iommu *iommu) 1089 { 1090 memunmap(iommu->evt_buf); 1091 } 1092 1093 static void __init free_iommu_buffers(struct amd_iommu *iommu) 1094 { 1095 if (is_kdump_kernel()) { 1096 unmap_cwwb_sem(iommu); 1097 unmap_command_buffer(iommu); 1098 unmap_event_buffer(iommu); 1099 } else { 1100 free_cwwb_sem(iommu); 1101 free_command_buffer(iommu); 1102 free_event_buffer(iommu); 1103 } 1104 } 1105 1106 static void iommu_enable_xt(struct amd_iommu *iommu) 1107 { 1108 #ifdef CONFIG_IRQ_REMAP 1109 /* 1110 * XT mode (32-bit APIC destination ID) requires 1111 * GA mode (128-bit IRTE support) as a prerequisite. 1112 */ 1113 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 1114 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1115 iommu_feature_enable(iommu, CONTROL_XT_EN); 1116 #endif /* CONFIG_IRQ_REMAP */ 1117 } 1118 1119 static void iommu_enable_gt(struct amd_iommu *iommu) 1120 { 1121 if (!check_feature(FEATURE_GT)) 1122 return; 1123 1124 iommu_feature_enable(iommu, CONTROL_GT_EN); 1125 } 1126 1127 /* sets a specific bit in the device table entry. */ 1128 static void set_dte_bit(struct dev_table_entry *dte, u8 bit) 1129 { 1130 int i = (bit >> 6) & 0x03; 1131 int _bit = bit & 0x3f; 1132 1133 dte->data[i] |= (1UL << _bit); 1134 } 1135 1136 static bool __reuse_device_table(struct amd_iommu *iommu) 1137 { 1138 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1139 u32 lo, hi, old_devtb_size; 1140 phys_addr_t old_devtb_phys; 1141 u64 entry; 1142 1143 /* Each IOMMU use separate device table with the same size */ 1144 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 1145 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 1146 entry = (((u64) hi) << 32) + lo; 1147 1148 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 1149 if (old_devtb_size != pci_seg->dev_table_size) { 1150 pr_err("The device table size of IOMMU:%d is not expected!\n", 1151 iommu->index); 1152 return false; 1153 } 1154 1155 /* 1156 * When SME is enabled in the first kernel, the entry includes the 1157 * memory encryption mask(sme_me_mask), we must remove the memory 1158 * encryption mask to obtain the true physical address in kdump kernel. 1159 */ 1160 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 1161 1162 if (old_devtb_phys >= 0x100000000ULL) { 1163 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 1164 return false; 1165 } 1166 1167 /* 1168 * Re-use the previous kernel's device table for kdump. 1169 */ 1170 pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size); 1171 if (pci_seg->old_dev_tbl_cpy == NULL) { 1172 pr_err("Failed to remap memory for reusing old device table!\n"); 1173 return false; 1174 } 1175 1176 return true; 1177 } 1178 1179 static bool reuse_device_table(void) 1180 { 1181 struct amd_iommu *iommu; 1182 struct amd_iommu_pci_seg *pci_seg; 1183 1184 if (!amd_iommu_pre_enabled) 1185 return false; 1186 1187 pr_warn("Translation is already enabled - trying to reuse translation structures\n"); 1188 1189 /* 1190 * All IOMMUs within PCI segment shares common device table. 1191 * Hence reuse device table only once per PCI segment. 1192 */ 1193 for_each_pci_segment(pci_seg) { 1194 for_each_iommu(iommu) { 1195 if (pci_seg->id != iommu->pci_seg->id) 1196 continue; 1197 if (!__reuse_device_table(iommu)) 1198 return false; 1199 break; 1200 } 1201 } 1202 1203 return true; 1204 } 1205 1206 struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid) 1207 { 1208 struct ivhd_dte_flags *e; 1209 unsigned int best_len = UINT_MAX; 1210 struct dev_table_entry *dte = NULL; 1211 1212 for_each_ivhd_dte_flags(e) { 1213 /* 1214 * Need to go through the whole list to find the smallest range, 1215 * which contains the devid. 1216 */ 1217 if ((e->segid == segid) && 1218 (e->devid_first <= devid) && (devid <= e->devid_last)) { 1219 unsigned int len = e->devid_last - e->devid_first; 1220 1221 if (len < best_len) { 1222 dte = &(e->dte); 1223 best_len = len; 1224 } 1225 } 1226 } 1227 return dte; 1228 } 1229 1230 static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last) 1231 { 1232 struct ivhd_dte_flags *e; 1233 1234 for_each_ivhd_dte_flags(e) { 1235 if ((e->segid == segid) && 1236 (e->devid_first == first) && 1237 (e->devid_last == last)) 1238 return true; 1239 } 1240 return false; 1241 } 1242 1243 /* 1244 * This function takes the device specific flags read from the ACPI 1245 * table and sets up the device table entry with that information 1246 */ 1247 static void __init 1248 set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last, 1249 u32 flags, u32 ext_flags) 1250 { 1251 int i; 1252 struct dev_table_entry dte = {}; 1253 1254 /* Parse IVHD DTE setting flags and store information */ 1255 if (flags) { 1256 struct ivhd_dte_flags *d; 1257 1258 if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last)) 1259 return; 1260 1261 d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL); 1262 if (!d) 1263 return; 1264 1265 pr_debug("%s: devid range %#x:%#x\n", __func__, first, last); 1266 1267 if (flags & ACPI_DEVFLAG_INITPASS) 1268 set_dte_bit(&dte, DEV_ENTRY_INIT_PASS); 1269 if (flags & ACPI_DEVFLAG_EXTINT) 1270 set_dte_bit(&dte, DEV_ENTRY_EINT_PASS); 1271 if (flags & ACPI_DEVFLAG_NMI) 1272 set_dte_bit(&dte, DEV_ENTRY_NMI_PASS); 1273 if (flags & ACPI_DEVFLAG_SYSMGT1) 1274 set_dte_bit(&dte, DEV_ENTRY_SYSMGT1); 1275 if (flags & ACPI_DEVFLAG_SYSMGT2) 1276 set_dte_bit(&dte, DEV_ENTRY_SYSMGT2); 1277 if (flags & ACPI_DEVFLAG_LINT0) 1278 set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS); 1279 if (flags & ACPI_DEVFLAG_LINT1) 1280 set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS); 1281 1282 /* Apply erratum 63, which needs info in initial_dte */ 1283 if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1) 1284 dte.data[0] |= DTE_FLAG_IW; 1285 1286 memcpy(&d->dte, &dte, sizeof(dte)); 1287 d->segid = iommu->pci_seg->id; 1288 d->devid_first = first; 1289 d->devid_last = last; 1290 list_add_tail(&d->list, &amd_ivhd_dev_flags_list); 1291 } 1292 1293 for (i = first; i <= last; i++) { 1294 if (flags) { 1295 struct dev_table_entry *dev_table = get_dev_table(iommu); 1296 1297 memcpy(&dev_table[i], &dte, sizeof(dte)); 1298 } 1299 amd_iommu_set_rlookup_table(iommu, i); 1300 } 1301 } 1302 1303 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1304 u16 devid, u32 flags, u32 ext_flags) 1305 { 1306 set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags); 1307 } 1308 1309 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) 1310 { 1311 struct devid_map *entry; 1312 struct list_head *list; 1313 1314 if (type == IVHD_SPECIAL_IOAPIC) 1315 list = &ioapic_map; 1316 else if (type == IVHD_SPECIAL_HPET) 1317 list = &hpet_map; 1318 else 1319 return -EINVAL; 1320 1321 list_for_each_entry(entry, list, list) { 1322 if (!(entry->id == id && entry->cmd_line)) 1323 continue; 1324 1325 pr_info("Command-line override present for %s id %d - ignoring\n", 1326 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1327 1328 *devid = entry->devid; 1329 1330 return 0; 1331 } 1332 1333 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1334 if (!entry) 1335 return -ENOMEM; 1336 1337 entry->id = id; 1338 entry->devid = *devid; 1339 entry->cmd_line = cmd_line; 1340 1341 list_add_tail(&entry->list, list); 1342 1343 return 0; 1344 } 1345 1346 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, 1347 bool cmd_line) 1348 { 1349 struct acpihid_map_entry *entry; 1350 struct list_head *list = &acpihid_map; 1351 1352 list_for_each_entry(entry, list, list) { 1353 if (strcmp(entry->hid, hid) || 1354 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1355 !entry->cmd_line) 1356 continue; 1357 1358 pr_info("Command-line override for hid:%s uid:%s\n", 1359 hid, uid); 1360 *devid = entry->devid; 1361 return 0; 1362 } 1363 1364 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1365 if (!entry) 1366 return -ENOMEM; 1367 1368 memcpy(entry->uid, uid, strlen(uid)); 1369 memcpy(entry->hid, hid, strlen(hid)); 1370 entry->devid = *devid; 1371 entry->cmd_line = cmd_line; 1372 entry->root_devid = (entry->devid & (~0x7)); 1373 1374 pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n", 1375 entry->cmd_line ? "cmd" : "ivrs", 1376 entry->hid, entry->uid, entry->root_devid); 1377 1378 list_add_tail(&entry->list, list); 1379 return 0; 1380 } 1381 1382 static int __init add_early_maps(void) 1383 { 1384 int i, ret; 1385 1386 for (i = 0; i < early_ioapic_map_size; ++i) { 1387 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1388 early_ioapic_map[i].id, 1389 &early_ioapic_map[i].devid, 1390 early_ioapic_map[i].cmd_line); 1391 if (ret) 1392 return ret; 1393 } 1394 1395 for (i = 0; i < early_hpet_map_size; ++i) { 1396 ret = add_special_device(IVHD_SPECIAL_HPET, 1397 early_hpet_map[i].id, 1398 &early_hpet_map[i].devid, 1399 early_hpet_map[i].cmd_line); 1400 if (ret) 1401 return ret; 1402 } 1403 1404 for (i = 0; i < early_acpihid_map_size; ++i) { 1405 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1406 early_acpihid_map[i].uid, 1407 &early_acpihid_map[i].devid, 1408 early_acpihid_map[i].cmd_line); 1409 if (ret) 1410 return ret; 1411 } 1412 1413 return 0; 1414 } 1415 1416 /* 1417 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1418 * initializes the hardware and our data structures with it. 1419 */ 1420 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1421 struct ivhd_header *h) 1422 { 1423 u8 *p = (u8 *)h; 1424 u8 *end = p, flags = 0; 1425 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; 1426 u32 dev_i, ext_flags = 0; 1427 bool alias = false; 1428 struct ivhd_entry *e; 1429 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1430 u32 ivhd_size; 1431 int ret; 1432 1433 1434 ret = add_early_maps(); 1435 if (ret) 1436 return ret; 1437 1438 amd_iommu_apply_ivrs_quirks(); 1439 1440 /* 1441 * First save the recommended feature enable bits from ACPI 1442 */ 1443 iommu->acpi_flags = h->flags; 1444 1445 /* 1446 * Done. Now parse the device entries 1447 */ 1448 ivhd_size = get_ivhd_header_size(h); 1449 if (!ivhd_size) { 1450 pr_err("Unsupported IVHD type %#x\n", h->type); 1451 return -EINVAL; 1452 } 1453 1454 p += ivhd_size; 1455 1456 end += h->length; 1457 1458 1459 while (p < end) { 1460 e = (struct ivhd_entry *)p; 1461 seg_id = pci_seg->id; 1462 1463 switch (e->type) { 1464 case IVHD_DEV_ALL: 1465 1466 DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags); 1467 set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0); 1468 break; 1469 case IVHD_DEV_SELECT: 1470 1471 DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n", 1472 seg_id, PCI_BUS_NUM(e->devid), 1473 PCI_SLOT(e->devid), 1474 PCI_FUNC(e->devid), 1475 e->flags); 1476 1477 devid = e->devid; 1478 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1479 break; 1480 case IVHD_DEV_SELECT_RANGE_START: 1481 1482 DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n", 1483 seg_id, PCI_BUS_NUM(e->devid), 1484 PCI_SLOT(e->devid), 1485 PCI_FUNC(e->devid), 1486 e->flags); 1487 1488 devid_start = e->devid; 1489 flags = e->flags; 1490 ext_flags = 0; 1491 alias = false; 1492 break; 1493 case IVHD_DEV_ALIAS: 1494 1495 DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n", 1496 seg_id, PCI_BUS_NUM(e->devid), 1497 PCI_SLOT(e->devid), 1498 PCI_FUNC(e->devid), 1499 e->flags, 1500 PCI_BUS_NUM(e->ext >> 8), 1501 PCI_SLOT(e->ext >> 8), 1502 PCI_FUNC(e->ext >> 8)); 1503 1504 devid = e->devid; 1505 devid_to = e->ext >> 8; 1506 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1507 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1508 pci_seg->alias_table[devid] = devid_to; 1509 break; 1510 case IVHD_DEV_ALIAS_RANGE: 1511 1512 DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n", 1513 seg_id, PCI_BUS_NUM(e->devid), 1514 PCI_SLOT(e->devid), 1515 PCI_FUNC(e->devid), 1516 e->flags, 1517 seg_id, PCI_BUS_NUM(e->ext >> 8), 1518 PCI_SLOT(e->ext >> 8), 1519 PCI_FUNC(e->ext >> 8)); 1520 1521 devid_start = e->devid; 1522 flags = e->flags; 1523 devid_to = e->ext >> 8; 1524 ext_flags = 0; 1525 alias = true; 1526 break; 1527 case IVHD_DEV_EXT_SELECT: 1528 1529 DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n", 1530 seg_id, PCI_BUS_NUM(e->devid), 1531 PCI_SLOT(e->devid), 1532 PCI_FUNC(e->devid), 1533 e->flags, e->ext); 1534 1535 devid = e->devid; 1536 set_dev_entry_from_acpi(iommu, devid, e->flags, 1537 e->ext); 1538 break; 1539 case IVHD_DEV_EXT_SELECT_RANGE: 1540 1541 DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n", 1542 seg_id, PCI_BUS_NUM(e->devid), 1543 PCI_SLOT(e->devid), 1544 PCI_FUNC(e->devid), 1545 e->flags, e->ext); 1546 1547 devid_start = e->devid; 1548 flags = e->flags; 1549 ext_flags = e->ext; 1550 alias = false; 1551 break; 1552 case IVHD_DEV_RANGE_END: 1553 1554 DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n", 1555 seg_id, PCI_BUS_NUM(e->devid), 1556 PCI_SLOT(e->devid), 1557 PCI_FUNC(e->devid)); 1558 1559 devid = e->devid; 1560 if (alias) { 1561 for (dev_i = devid_start; dev_i <= devid; ++dev_i) 1562 pci_seg->alias_table[dev_i] = devid_to; 1563 set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); 1564 } 1565 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); 1566 break; 1567 case IVHD_DEV_SPECIAL: { 1568 u8 handle, type; 1569 const char *var; 1570 u32 devid; 1571 int ret; 1572 1573 handle = e->ext & 0xff; 1574 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); 1575 type = (e->ext >> 24) & 0xff; 1576 1577 if (type == IVHD_SPECIAL_IOAPIC) 1578 var = "IOAPIC"; 1579 else if (type == IVHD_SPECIAL_HPET) 1580 var = "HPET"; 1581 else 1582 var = "UNKNOWN"; 1583 1584 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n", 1585 var, (int)handle, 1586 seg_id, PCI_BUS_NUM(devid), 1587 PCI_SLOT(devid), 1588 PCI_FUNC(devid), 1589 e->flags); 1590 1591 ret = add_special_device(type, handle, &devid, false); 1592 if (ret) 1593 return ret; 1594 1595 /* 1596 * add_special_device might update the devid in case a 1597 * command-line override is present. So call 1598 * set_dev_entry_from_acpi after add_special_device. 1599 */ 1600 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1601 1602 break; 1603 } 1604 case IVHD_DEV_ACPI_HID: { 1605 u32 devid; 1606 u8 hid[ACPIHID_HID_LEN]; 1607 u8 uid[ACPIHID_UID_LEN]; 1608 int ret; 1609 1610 if (h->type != 0x40) { 1611 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1612 e->type); 1613 break; 1614 } 1615 1616 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); 1617 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); 1618 hid[ACPIHID_HID_LEN - 1] = '\0'; 1619 1620 if (!(*hid)) { 1621 pr_err(FW_BUG "Invalid HID.\n"); 1622 break; 1623 } 1624 1625 uid[0] = '\0'; 1626 switch (e->uidf) { 1627 case UID_NOT_PRESENT: 1628 1629 if (e->uidl != 0) 1630 pr_warn(FW_BUG "Invalid UID length.\n"); 1631 1632 break; 1633 case UID_IS_INTEGER: 1634 1635 sprintf(uid, "%d", e->uid); 1636 1637 break; 1638 case UID_IS_CHARACTER: 1639 1640 memcpy(uid, &e->uid, e->uidl); 1641 uid[e->uidl] = '\0'; 1642 1643 break; 1644 default: 1645 break; 1646 } 1647 1648 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); 1649 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n", 1650 hid, uid, seg_id, 1651 PCI_BUS_NUM(devid), 1652 PCI_SLOT(devid), 1653 PCI_FUNC(devid), 1654 e->flags); 1655 1656 flags = e->flags; 1657 1658 ret = add_acpi_hid_device(hid, uid, &devid, false); 1659 if (ret) 1660 return ret; 1661 1662 /* 1663 * add_special_device might update the devid in case a 1664 * command-line override is present. So call 1665 * set_dev_entry_from_acpi after add_special_device. 1666 */ 1667 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1668 1669 break; 1670 } 1671 default: 1672 break; 1673 } 1674 1675 p += ivhd_entry_length(p); 1676 } 1677 1678 return 0; 1679 } 1680 1681 /* Allocate PCI segment data structure */ 1682 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, 1683 struct acpi_table_header *ivrs_base) 1684 { 1685 struct amd_iommu_pci_seg *pci_seg; 1686 int last_bdf; 1687 1688 /* 1689 * First parse ACPI tables to find the largest Bus/Dev/Func we need to 1690 * handle in this PCI segment. Upon this information the shared data 1691 * structures for the PCI segments in the system will be allocated. 1692 */ 1693 last_bdf = find_last_devid_acpi(ivrs_base, id); 1694 if (last_bdf < 0) 1695 return NULL; 1696 1697 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); 1698 if (pci_seg == NULL) 1699 return NULL; 1700 1701 pci_seg->last_bdf = last_bdf; 1702 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); 1703 pci_seg->dev_table_size = 1704 max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE), 1705 SZ_4K); 1706 1707 pci_seg->id = id; 1708 init_llist_head(&pci_seg->dev_data_list); 1709 INIT_LIST_HEAD(&pci_seg->unity_map); 1710 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); 1711 1712 if (alloc_dev_table(pci_seg)) 1713 return NULL; 1714 if (alloc_alias_table(pci_seg)) 1715 return NULL; 1716 if (alloc_rlookup_table(pci_seg)) 1717 return NULL; 1718 1719 return pci_seg; 1720 } 1721 1722 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, 1723 struct acpi_table_header *ivrs_base) 1724 { 1725 struct amd_iommu_pci_seg *pci_seg; 1726 1727 for_each_pci_segment(pci_seg) { 1728 if (pci_seg->id == id) 1729 return pci_seg; 1730 } 1731 1732 return alloc_pci_segment(id, ivrs_base); 1733 } 1734 1735 static void __init free_pci_segments(void) 1736 { 1737 struct amd_iommu_pci_seg *pci_seg, *next; 1738 1739 for_each_pci_segment_safe(pci_seg, next) { 1740 list_del(&pci_seg->list); 1741 free_irq_lookup_table(pci_seg); 1742 free_rlookup_table(pci_seg); 1743 free_alias_table(pci_seg); 1744 free_dev_table(pci_seg); 1745 kfree(pci_seg); 1746 } 1747 } 1748 1749 static void __init free_sysfs(struct amd_iommu *iommu) 1750 { 1751 if (iommu->iommu.dev) { 1752 iommu_device_unregister(&iommu->iommu); 1753 iommu_device_sysfs_remove(&iommu->iommu); 1754 } 1755 } 1756 1757 static void __init free_iommu_one(struct amd_iommu *iommu) 1758 { 1759 free_sysfs(iommu); 1760 free_iommu_buffers(iommu); 1761 amd_iommu_free_ppr_log(iommu); 1762 free_ga_log(iommu); 1763 iommu_unmap_mmio_space(iommu); 1764 amd_iommu_iopf_uninit(iommu); 1765 } 1766 1767 static void __init free_iommu_all(void) 1768 { 1769 struct amd_iommu *iommu, *next; 1770 1771 for_each_iommu_safe(iommu, next) { 1772 list_del(&iommu->list); 1773 free_iommu_one(iommu); 1774 kfree(iommu); 1775 } 1776 } 1777 1778 /* 1779 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1780 * Workaround: 1781 * BIOS should disable L2B micellaneous clock gating by setting 1782 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1783 */ 1784 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1785 { 1786 u32 value; 1787 1788 if ((boot_cpu_data.x86 != 0x15) || 1789 (boot_cpu_data.x86_model < 0x10) || 1790 (boot_cpu_data.x86_model > 0x1f)) 1791 return; 1792 1793 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1794 pci_read_config_dword(iommu->dev, 0xf4, &value); 1795 1796 if (value & BIT(2)) 1797 return; 1798 1799 /* Select NB indirect register 0x90 and enable writing */ 1800 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1801 1802 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1803 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1804 1805 /* Clear the enable writing bit */ 1806 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1807 } 1808 1809 /* 1810 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1811 * Workaround: 1812 * BIOS should enable ATS write permission check by setting 1813 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1814 */ 1815 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1816 { 1817 u32 value; 1818 1819 if ((boot_cpu_data.x86 != 0x15) || 1820 (boot_cpu_data.x86_model < 0x30) || 1821 (boot_cpu_data.x86_model > 0x3f)) 1822 return; 1823 1824 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1825 value = iommu_read_l2(iommu, 0x47); 1826 1827 if (value & BIT(0)) 1828 return; 1829 1830 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1831 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1832 1833 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1834 } 1835 1836 /* 1837 * This function glues the initialization function for one IOMMU 1838 * together and also allocates the command buffer and programs the 1839 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1840 */ 1841 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, 1842 struct acpi_table_header *ivrs_base) 1843 { 1844 struct amd_iommu_pci_seg *pci_seg; 1845 1846 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); 1847 if (pci_seg == NULL) 1848 return -ENOMEM; 1849 iommu->pci_seg = pci_seg; 1850 1851 raw_spin_lock_init(&iommu->lock); 1852 atomic64_set(&iommu->cmd_sem_val, 0); 1853 1854 /* Add IOMMU to internal data structures */ 1855 list_add_tail(&iommu->list, &amd_iommu_list); 1856 iommu->index = amd_iommus_present++; 1857 1858 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1859 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1860 return -ENOSYS; 1861 } 1862 1863 /* 1864 * Copy data from ACPI table entry to the iommu struct 1865 */ 1866 iommu->devid = h->devid; 1867 iommu->cap_ptr = h->cap_ptr; 1868 iommu->mmio_phys = h->mmio_phys; 1869 1870 switch (h->type) { 1871 case 0x10: 1872 /* Check if IVHD EFR contains proper max banks/counters */ 1873 if ((h->efr_attr != 0) && 1874 ((h->efr_attr & (0xF << 13)) != 0) && 1875 ((h->efr_attr & (0x3F << 17)) != 0)) 1876 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1877 else 1878 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1879 1880 /* GAM requires GA mode. */ 1881 if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0) 1882 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1883 break; 1884 case 0x11: 1885 case 0x40: 1886 if (h->efr_reg & (1 << 9)) 1887 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1888 else 1889 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1890 1891 /* XT and GAM require GA mode. */ 1892 if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) { 1893 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1894 break; 1895 } 1896 1897 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) 1898 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1899 1900 if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) { 1901 pr_warn_once("Host Address Translation is not supported.\n"); 1902 amd_iommu_hatdis = true; 1903 } 1904 1905 early_iommu_features_init(iommu, h); 1906 1907 break; 1908 default: 1909 return -EINVAL; 1910 } 1911 1912 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1913 iommu->mmio_phys_end); 1914 if (!iommu->mmio_base) 1915 return -ENOMEM; 1916 1917 return init_iommu_from_acpi(iommu, h); 1918 } 1919 1920 static int __init init_iommu_one_late(struct amd_iommu *iommu) 1921 { 1922 int ret; 1923 1924 ret = alloc_iommu_buffers(iommu); 1925 if (ret) 1926 return ret; 1927 1928 iommu->int_enabled = false; 1929 1930 init_translation_status(iommu); 1931 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1932 iommu_disable(iommu); 1933 clear_translation_pre_enabled(iommu); 1934 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1935 iommu->index); 1936 } 1937 if (amd_iommu_pre_enabled) 1938 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1939 1940 if (amd_iommu_irq_remap) { 1941 ret = amd_iommu_create_irq_domain(iommu); 1942 if (ret) 1943 return ret; 1944 } 1945 1946 /* 1947 * Make sure IOMMU is not considered to translate itself. The IVRS 1948 * table tells us so, but this is a lie! 1949 */ 1950 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; 1951 1952 return 0; 1953 } 1954 1955 /** 1956 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1957 * @ivrs: Pointer to the IVRS header 1958 * 1959 * This function search through all IVDB of the maximum supported IVHD 1960 */ 1961 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1962 { 1963 u8 *base = (u8 *)ivrs; 1964 struct ivhd_header *ivhd = (struct ivhd_header *) 1965 (base + IVRS_HEADER_LENGTH); 1966 u8 last_type = ivhd->type; 1967 u16 devid = ivhd->devid; 1968 1969 while (((u8 *)ivhd - base < ivrs->length) && 1970 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1971 u8 *p = (u8 *) ivhd; 1972 1973 if (ivhd->devid == devid) 1974 last_type = ivhd->type; 1975 ivhd = (struct ivhd_header *)(p + ivhd->length); 1976 } 1977 1978 return last_type; 1979 } 1980 1981 /* 1982 * Iterates over all IOMMU entries in the ACPI table, allocates the 1983 * IOMMU structure and initializes it with init_iommu_one() 1984 */ 1985 static int __init init_iommu_all(struct acpi_table_header *table) 1986 { 1987 u8 *p = (u8 *)table, *end = (u8 *)table; 1988 struct ivhd_header *h; 1989 struct amd_iommu *iommu; 1990 int ret; 1991 1992 end += table->length; 1993 p += IVRS_HEADER_LENGTH; 1994 1995 /* Phase 1: Process all IVHD blocks */ 1996 while (p < end) { 1997 h = (struct ivhd_header *)p; 1998 if (*p == amd_iommu_target_ivhd_type) { 1999 2000 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " 2001 "flags: %01x info %04x\n", 2002 h->pci_seg, PCI_BUS_NUM(h->devid), 2003 PCI_SLOT(h->devid), PCI_FUNC(h->devid), 2004 h->cap_ptr, h->flags, h->info); 2005 DUMP_printk(" mmio-addr: %016llx\n", 2006 h->mmio_phys); 2007 2008 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 2009 if (iommu == NULL) 2010 return -ENOMEM; 2011 2012 ret = init_iommu_one(iommu, h, table); 2013 if (ret) 2014 return ret; 2015 } 2016 p += h->length; 2017 2018 } 2019 WARN_ON(p != end); 2020 2021 /* Phase 2 : Early feature support check */ 2022 get_global_efr(); 2023 2024 /* Phase 3 : Enabling IOMMU features */ 2025 for_each_iommu(iommu) { 2026 ret = init_iommu_one_late(iommu); 2027 if (ret) 2028 return ret; 2029 } 2030 2031 return 0; 2032 } 2033 2034 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 2035 { 2036 u64 val; 2037 struct pci_dev *pdev = iommu->dev; 2038 2039 if (!check_feature(FEATURE_PC)) 2040 return; 2041 2042 amd_iommu_pc_present = true; 2043 2044 pci_info(pdev, "IOMMU performance counters supported\n"); 2045 2046 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 2047 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 2048 iommu->max_counters = (u8) ((val >> 7) & 0xf); 2049 2050 return; 2051 } 2052 2053 static ssize_t amd_iommu_show_cap(struct device *dev, 2054 struct device_attribute *attr, 2055 char *buf) 2056 { 2057 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 2058 return sysfs_emit(buf, "%x\n", iommu->cap); 2059 } 2060 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 2061 2062 static ssize_t amd_iommu_show_features(struct device *dev, 2063 struct device_attribute *attr, 2064 char *buf) 2065 { 2066 return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2); 2067 } 2068 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 2069 2070 static struct attribute *amd_iommu_attrs[] = { 2071 &dev_attr_cap.attr, 2072 &dev_attr_features.attr, 2073 NULL, 2074 }; 2075 2076 static struct attribute_group amd_iommu_group = { 2077 .name = "amd-iommu", 2078 .attrs = amd_iommu_attrs, 2079 }; 2080 2081 static const struct attribute_group *amd_iommu_groups[] = { 2082 &amd_iommu_group, 2083 NULL, 2084 }; 2085 2086 /* 2087 * Note: IVHD 0x11 and 0x40 also contains exact copy 2088 * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. 2089 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). 2090 */ 2091 static void __init late_iommu_features_init(struct amd_iommu *iommu) 2092 { 2093 u64 features, features2; 2094 2095 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) 2096 return; 2097 2098 /* read extended feature bits */ 2099 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 2100 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); 2101 2102 if (!amd_iommu_efr) { 2103 amd_iommu_efr = features; 2104 amd_iommu_efr2 = features2; 2105 return; 2106 } 2107 2108 /* 2109 * Sanity check and warn if EFR values from 2110 * IVHD and MMIO conflict. 2111 */ 2112 if (features != amd_iommu_efr || 2113 features2 != amd_iommu_efr2) { 2114 pr_warn(FW_WARN 2115 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", 2116 features, amd_iommu_efr, 2117 features2, amd_iommu_efr2); 2118 } 2119 } 2120 2121 static int __init iommu_init_pci(struct amd_iommu *iommu) 2122 { 2123 int cap_ptr = iommu->cap_ptr; 2124 int ret; 2125 2126 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2127 PCI_BUS_NUM(iommu->devid), 2128 iommu->devid & 0xff); 2129 if (!iommu->dev) 2130 return -ENODEV; 2131 2132 /* ACPI _PRT won't have an IRQ for IOMMU */ 2133 iommu->dev->irq_managed = 1; 2134 2135 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 2136 &iommu->cap); 2137 2138 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 2139 amd_iommu_iotlb_sup = false; 2140 2141 late_iommu_features_init(iommu); 2142 2143 if (check_feature(FEATURE_GT)) { 2144 int glxval; 2145 u64 pasmax; 2146 2147 pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr); 2148 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; 2149 2150 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); 2151 2152 glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr); 2153 2154 if (amd_iommu_max_glx_val == -1) 2155 amd_iommu_max_glx_val = glxval; 2156 else 2157 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 2158 2159 iommu_enable_gt(iommu); 2160 } 2161 2162 if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu)) 2163 return -ENOMEM; 2164 2165 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { 2166 pr_info("Using strict mode due to virtualization\n"); 2167 iommu_set_dma_strict(); 2168 amd_iommu_np_cache = true; 2169 } 2170 2171 init_iommu_perf_ctr(iommu); 2172 2173 if (is_rd890_iommu(iommu->dev)) { 2174 int i, j; 2175 2176 iommu->root_pdev = 2177 pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2178 iommu->dev->bus->number, 2179 PCI_DEVFN(0, 0)); 2180 2181 /* 2182 * Some rd890 systems may not be fully reconfigured by the 2183 * BIOS, so it's necessary for us to store this information so 2184 * it can be reprogrammed on resume 2185 */ 2186 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 2187 &iommu->stored_addr_lo); 2188 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 2189 &iommu->stored_addr_hi); 2190 2191 /* Low bit locks writes to configuration space */ 2192 iommu->stored_addr_lo &= ~1; 2193 2194 for (i = 0; i < 6; i++) 2195 for (j = 0; j < 0x12; j++) 2196 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 2197 2198 for (i = 0; i < 0x83; i++) 2199 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 2200 } 2201 2202 amd_iommu_erratum_746_workaround(iommu); 2203 amd_iommu_ats_write_check_workaround(iommu); 2204 2205 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 2206 amd_iommu_groups, "ivhd%d", iommu->index); 2207 if (ret) 2208 return ret; 2209 2210 /* 2211 * Allocate per IOMMU IOPF queue here so that in attach device path, 2212 * PRI capable device can be added to IOPF queue 2213 */ 2214 if (amd_iommu_gt_ppr_supported()) { 2215 ret = amd_iommu_iopf_init(iommu); 2216 if (ret) 2217 return ret; 2218 } 2219 2220 ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); 2221 if (ret || amd_iommu_pgtable == PD_MODE_NONE) { 2222 /* 2223 * Remove sysfs if DMA translation is not supported by the 2224 * IOMMU. Do not return an error to enable IRQ remapping 2225 * in state_next(), DTE[V, TV] must eventually be set to 0. 2226 */ 2227 iommu_device_sysfs_remove(&iommu->iommu); 2228 } 2229 2230 return pci_enable_device(iommu->dev); 2231 } 2232 2233 static void print_iommu_info(void) 2234 { 2235 int i; 2236 static const char * const feat_str[] = { 2237 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 2238 "IA", "GA", "HE", "PC" 2239 }; 2240 2241 if (amd_iommu_efr) { 2242 pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2); 2243 2244 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 2245 if (check_feature(1ULL << i)) 2246 pr_cont(" %s", feat_str[i]); 2247 } 2248 2249 if (check_feature(FEATURE_GAM_VAPIC)) 2250 pr_cont(" GA_vAPIC"); 2251 2252 if (check_feature(FEATURE_SNP)) 2253 pr_cont(" SNP"); 2254 2255 pr_cont("\n"); 2256 } 2257 2258 if (irq_remapping_enabled) { 2259 pr_info("Interrupt remapping enabled\n"); 2260 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2261 pr_info("X2APIC enabled\n"); 2262 } 2263 if (amd_iommu_pgtable == PD_MODE_V2) { 2264 pr_info("V2 page table enabled (Paging mode : %d level)\n", 2265 amd_iommu_gpt_level); 2266 } 2267 } 2268 2269 static int __init amd_iommu_init_pci(void) 2270 { 2271 struct amd_iommu *iommu; 2272 struct amd_iommu_pci_seg *pci_seg; 2273 int ret; 2274 2275 /* Init global identity domain before registering IOMMU */ 2276 amd_iommu_init_identity_domain(); 2277 2278 for_each_iommu(iommu) { 2279 ret = iommu_init_pci(iommu); 2280 if (ret) { 2281 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", 2282 iommu->index, ret); 2283 goto out; 2284 } 2285 /* Need to setup range after PCI init */ 2286 iommu_set_cwwb_range(iommu); 2287 } 2288 2289 /* 2290 * Order is important here to make sure any unity map requirements are 2291 * fulfilled. The unity mappings are created and written to the device 2292 * table during the iommu_init_pci() call. 2293 * 2294 * After that we call init_device_table_dma() to make sure any 2295 * uninitialized DTE will block DMA, and in the end we flush the caches 2296 * of all IOMMUs to make sure the changes to the device table are 2297 * active. 2298 */ 2299 for_each_pci_segment(pci_seg) 2300 init_device_table_dma(pci_seg); 2301 2302 for_each_iommu(iommu) 2303 amd_iommu_flush_all_caches(iommu); 2304 2305 print_iommu_info(); 2306 2307 out: 2308 return ret; 2309 } 2310 2311 /**************************************************************************** 2312 * 2313 * The following functions initialize the MSI interrupts for all IOMMUs 2314 * in the system. It's a bit challenging because there could be multiple 2315 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 2316 * pci_dev. 2317 * 2318 ****************************************************************************/ 2319 2320 static int iommu_setup_msi(struct amd_iommu *iommu) 2321 { 2322 int r; 2323 2324 r = pci_enable_msi(iommu->dev); 2325 if (r) 2326 return r; 2327 2328 r = request_threaded_irq(iommu->dev->irq, 2329 amd_iommu_int_handler, 2330 amd_iommu_int_thread, 2331 0, "AMD-Vi", 2332 iommu); 2333 2334 if (r) { 2335 pci_disable_msi(iommu->dev); 2336 return r; 2337 } 2338 2339 return 0; 2340 } 2341 2342 union intcapxt { 2343 u64 capxt; 2344 struct { 2345 u64 reserved_0 : 2, 2346 dest_mode_logical : 1, 2347 reserved_1 : 5, 2348 destid_0_23 : 24, 2349 vector : 8, 2350 reserved_2 : 16, 2351 destid_24_31 : 8; 2352 }; 2353 } __attribute__ ((packed)); 2354 2355 2356 static struct irq_chip intcapxt_controller; 2357 2358 static int intcapxt_irqdomain_activate(struct irq_domain *domain, 2359 struct irq_data *irqd, bool reserve) 2360 { 2361 return 0; 2362 } 2363 2364 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, 2365 struct irq_data *irqd) 2366 { 2367 } 2368 2369 2370 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 2371 unsigned int nr_irqs, void *arg) 2372 { 2373 struct irq_alloc_info *info = arg; 2374 int i, ret; 2375 2376 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) 2377 return -EINVAL; 2378 2379 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 2380 if (ret < 0) 2381 return ret; 2382 2383 for (i = virq; i < virq + nr_irqs; i++) { 2384 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2385 2386 irqd->chip = &intcapxt_controller; 2387 irqd->hwirq = info->hwirq; 2388 irqd->chip_data = info->data; 2389 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2390 } 2391 2392 return ret; 2393 } 2394 2395 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, 2396 unsigned int nr_irqs) 2397 { 2398 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2399 } 2400 2401 2402 static void intcapxt_unmask_irq(struct irq_data *irqd) 2403 { 2404 struct amd_iommu *iommu = irqd->chip_data; 2405 struct irq_cfg *cfg = irqd_cfg(irqd); 2406 union intcapxt xt; 2407 2408 xt.capxt = 0ULL; 2409 xt.dest_mode_logical = apic->dest_mode_logical; 2410 xt.vector = cfg->vector; 2411 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2412 xt.destid_24_31 = cfg->dest_apicid >> 24; 2413 2414 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); 2415 } 2416 2417 static void intcapxt_mask_irq(struct irq_data *irqd) 2418 { 2419 struct amd_iommu *iommu = irqd->chip_data; 2420 2421 writeq(0, iommu->mmio_base + irqd->hwirq); 2422 } 2423 2424 2425 static int intcapxt_set_affinity(struct irq_data *irqd, 2426 const struct cpumask *mask, bool force) 2427 { 2428 struct irq_data *parent = irqd->parent_data; 2429 int ret; 2430 2431 ret = parent->chip->irq_set_affinity(parent, mask, force); 2432 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 2433 return ret; 2434 return 0; 2435 } 2436 2437 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) 2438 { 2439 return on ? -EOPNOTSUPP : 0; 2440 } 2441 2442 static struct irq_chip intcapxt_controller = { 2443 .name = "IOMMU-MSI", 2444 .irq_unmask = intcapxt_unmask_irq, 2445 .irq_mask = intcapxt_mask_irq, 2446 .irq_ack = irq_chip_ack_parent, 2447 .irq_retrigger = irq_chip_retrigger_hierarchy, 2448 .irq_set_affinity = intcapxt_set_affinity, 2449 .irq_set_wake = intcapxt_set_wake, 2450 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED, 2451 }; 2452 2453 static const struct irq_domain_ops intcapxt_domain_ops = { 2454 .alloc = intcapxt_irqdomain_alloc, 2455 .free = intcapxt_irqdomain_free, 2456 .activate = intcapxt_irqdomain_activate, 2457 .deactivate = intcapxt_irqdomain_deactivate, 2458 }; 2459 2460 2461 static struct irq_domain *iommu_irqdomain; 2462 2463 static struct irq_domain *iommu_get_irqdomain(void) 2464 { 2465 struct fwnode_handle *fn; 2466 2467 /* No need for locking here (yet) as the init is single-threaded */ 2468 if (iommu_irqdomain) 2469 return iommu_irqdomain; 2470 2471 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); 2472 if (!fn) 2473 return NULL; 2474 2475 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, 2476 fn, &intcapxt_domain_ops, 2477 NULL); 2478 if (!iommu_irqdomain) 2479 irq_domain_free_fwnode(fn); 2480 2481 return iommu_irqdomain; 2482 } 2483 2484 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, 2485 int hwirq, irq_handler_t thread_fn) 2486 { 2487 struct irq_domain *domain; 2488 struct irq_alloc_info info; 2489 int irq, ret; 2490 int node = dev_to_node(&iommu->dev->dev); 2491 2492 domain = iommu_get_irqdomain(); 2493 if (!domain) 2494 return -ENXIO; 2495 2496 init_irq_alloc_info(&info, NULL); 2497 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2498 info.data = iommu; 2499 info.hwirq = hwirq; 2500 2501 irq = irq_domain_alloc_irqs(domain, 1, node, &info); 2502 if (irq < 0) { 2503 irq_domain_remove(domain); 2504 return irq; 2505 } 2506 2507 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2508 thread_fn, 0, devname, iommu); 2509 if (ret) { 2510 irq_domain_free_irqs(irq, 1); 2511 irq_domain_remove(domain); 2512 return ret; 2513 } 2514 2515 return 0; 2516 } 2517 2518 static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2519 { 2520 int ret; 2521 2522 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), 2523 "AMD-Vi%d-Evt", iommu->index); 2524 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, 2525 MMIO_INTCAPXT_EVT_OFFSET, 2526 amd_iommu_int_thread_evtlog); 2527 if (ret) 2528 return ret; 2529 2530 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), 2531 "AMD-Vi%d-PPR", iommu->index); 2532 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, 2533 MMIO_INTCAPXT_PPR_OFFSET, 2534 amd_iommu_int_thread_pprlog); 2535 if (ret) 2536 return ret; 2537 2538 #ifdef CONFIG_IRQ_REMAP 2539 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), 2540 "AMD-Vi%d-GA", iommu->index); 2541 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, 2542 MMIO_INTCAPXT_GALOG_OFFSET, 2543 amd_iommu_int_thread_galog); 2544 #endif 2545 2546 return ret; 2547 } 2548 2549 static int iommu_init_irq(struct amd_iommu *iommu) 2550 { 2551 int ret; 2552 2553 if (iommu->int_enabled) 2554 goto enable_faults; 2555 2556 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2557 ret = iommu_setup_intcapxt(iommu); 2558 else if (iommu->dev->msi_cap) 2559 ret = iommu_setup_msi(iommu); 2560 else 2561 ret = -ENODEV; 2562 2563 if (ret) 2564 return ret; 2565 2566 iommu->int_enabled = true; 2567 enable_faults: 2568 2569 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2570 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2571 2572 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2573 2574 return 0; 2575 } 2576 2577 /**************************************************************************** 2578 * 2579 * The next functions belong to the third pass of parsing the ACPI 2580 * table. In this last pass the memory mapping requirements are 2581 * gathered (like exclusion and unity mapping ranges). 2582 * 2583 ****************************************************************************/ 2584 2585 static void __init free_unity_maps(void) 2586 { 2587 struct unity_map_entry *entry, *next; 2588 struct amd_iommu_pci_seg *p, *pci_seg; 2589 2590 for_each_pci_segment_safe(pci_seg, p) { 2591 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { 2592 list_del(&entry->list); 2593 kfree(entry); 2594 } 2595 } 2596 } 2597 2598 /* called for unity map ACPI definition */ 2599 static int __init init_unity_map_range(struct ivmd_header *m, 2600 struct acpi_table_header *ivrs_base) 2601 { 2602 struct unity_map_entry *e = NULL; 2603 struct amd_iommu_pci_seg *pci_seg; 2604 char *s; 2605 2606 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); 2607 if (pci_seg == NULL) 2608 return -ENOMEM; 2609 2610 e = kzalloc(sizeof(*e), GFP_KERNEL); 2611 if (e == NULL) 2612 return -ENOMEM; 2613 2614 switch (m->type) { 2615 default: 2616 kfree(e); 2617 return 0; 2618 case ACPI_IVMD_TYPE: 2619 s = "IVMD_TYPEi\t\t\t"; 2620 e->devid_start = e->devid_end = m->devid; 2621 break; 2622 case ACPI_IVMD_TYPE_ALL: 2623 s = "IVMD_TYPE_ALL\t\t"; 2624 e->devid_start = 0; 2625 e->devid_end = pci_seg->last_bdf; 2626 break; 2627 case ACPI_IVMD_TYPE_RANGE: 2628 s = "IVMD_TYPE_RANGE\t\t"; 2629 e->devid_start = m->devid; 2630 e->devid_end = m->aux; 2631 break; 2632 } 2633 e->address_start = PAGE_ALIGN(m->range_start); 2634 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2635 e->prot = m->flags >> 1; 2636 2637 /* 2638 * Treat per-device exclusion ranges as r/w unity-mapped regions 2639 * since some buggy BIOSes might lead to the overwritten exclusion 2640 * range (exclusion_start and exclusion_length members). This 2641 * happens when there are multiple exclusion ranges (IVMD entries) 2642 * defined in ACPI table. 2643 */ 2644 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2645 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2646 2647 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " 2648 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" 2649 " flags: %x\n", s, m->pci_seg, 2650 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2651 PCI_FUNC(e->devid_start), m->pci_seg, 2652 PCI_BUS_NUM(e->devid_end), 2653 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2654 e->address_start, e->address_end, m->flags); 2655 2656 list_add_tail(&e->list, &pci_seg->unity_map); 2657 2658 return 0; 2659 } 2660 2661 /* iterates over all memory definitions we find in the ACPI table */ 2662 static int __init init_memory_definitions(struct acpi_table_header *table) 2663 { 2664 u8 *p = (u8 *)table, *end = (u8 *)table; 2665 struct ivmd_header *m; 2666 2667 end += table->length; 2668 p += IVRS_HEADER_LENGTH; 2669 2670 while (p < end) { 2671 m = (struct ivmd_header *)p; 2672 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2673 init_unity_map_range(m, table); 2674 2675 p += m->length; 2676 } 2677 2678 return 0; 2679 } 2680 2681 /* 2682 * Init the device table to not allow DMA access for devices 2683 */ 2684 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2685 { 2686 u32 devid; 2687 struct dev_table_entry *dev_table = pci_seg->dev_table; 2688 2689 if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE) 2690 return; 2691 2692 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2693 set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID); 2694 if (!amd_iommu_snp_en) 2695 set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION); 2696 } 2697 } 2698 2699 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2700 { 2701 u32 devid; 2702 struct dev_table_entry *dev_table = pci_seg->dev_table; 2703 2704 if (dev_table == NULL) 2705 return; 2706 2707 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2708 dev_table[devid].data[0] = 0ULL; 2709 dev_table[devid].data[1] = 0ULL; 2710 } 2711 } 2712 2713 static void init_device_table(void) 2714 { 2715 struct amd_iommu_pci_seg *pci_seg; 2716 u32 devid; 2717 2718 if (!amd_iommu_irq_remap) 2719 return; 2720 2721 for_each_pci_segment(pci_seg) { 2722 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) 2723 set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN); 2724 } 2725 } 2726 2727 static void iommu_init_flags(struct amd_iommu *iommu) 2728 { 2729 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2730 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2731 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2732 2733 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2734 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2735 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2736 2737 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2738 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2739 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2740 2741 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2742 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2743 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2744 2745 /* 2746 * make IOMMU memory accesses cache coherent 2747 */ 2748 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2749 2750 /* Set IOTLB invalidation timeout to 1s */ 2751 iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT); 2752 2753 /* Enable Enhanced Peripheral Page Request Handling */ 2754 if (check_feature(FEATURE_EPHSUP)) 2755 iommu_feature_enable(iommu, CONTROL_EPH_EN); 2756 } 2757 2758 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2759 { 2760 int i, j; 2761 u32 ioc_feature_control; 2762 struct pci_dev *pdev = iommu->root_pdev; 2763 2764 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2765 if (!is_rd890_iommu(iommu->dev) || !pdev) 2766 return; 2767 2768 /* 2769 * First, we need to ensure that the iommu is enabled. This is 2770 * controlled by a register in the northbridge 2771 */ 2772 2773 /* Select Northbridge indirect register 0x75 and enable writing */ 2774 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2775 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2776 2777 /* Enable the iommu */ 2778 if (!(ioc_feature_control & 0x1)) 2779 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2780 2781 /* Restore the iommu BAR */ 2782 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2783 iommu->stored_addr_lo); 2784 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2785 iommu->stored_addr_hi); 2786 2787 /* Restore the l1 indirect regs for each of the 6 l1s */ 2788 for (i = 0; i < 6; i++) 2789 for (j = 0; j < 0x12; j++) 2790 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2791 2792 /* Restore the l2 indirect regs */ 2793 for (i = 0; i < 0x83; i++) 2794 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2795 2796 /* Lock PCI setup registers */ 2797 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2798 iommu->stored_addr_lo | 1); 2799 } 2800 2801 static void iommu_enable_ga(struct amd_iommu *iommu) 2802 { 2803 #ifdef CONFIG_IRQ_REMAP 2804 switch (amd_iommu_guest_ir) { 2805 case AMD_IOMMU_GUEST_IR_VAPIC: 2806 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2807 iommu_feature_enable(iommu, CONTROL_GA_EN); 2808 iommu->irte_ops = &irte_128_ops; 2809 break; 2810 default: 2811 iommu->irte_ops = &irte_32_ops; 2812 break; 2813 } 2814 #endif 2815 } 2816 2817 static void iommu_disable_irtcachedis(struct amd_iommu *iommu) 2818 { 2819 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 2820 } 2821 2822 static void iommu_enable_irtcachedis(struct amd_iommu *iommu) 2823 { 2824 u64 ctrl; 2825 2826 if (!amd_iommu_irtcachedis) 2827 return; 2828 2829 /* 2830 * Note: 2831 * The support for IRTCacheDis feature is dertermined by 2832 * checking if the bit is writable. 2833 */ 2834 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); 2835 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 2836 ctrl &= (1ULL << CONTROL_IRTCACHEDIS); 2837 if (ctrl) 2838 iommu->irtcachedis_enabled = true; 2839 pr_info("iommu%d (%#06x) : IRT cache is %s\n", 2840 iommu->index, iommu->devid, 2841 iommu->irtcachedis_enabled ? "disabled" : "enabled"); 2842 } 2843 2844 static void iommu_enable_2k_int(struct amd_iommu *iommu) 2845 { 2846 if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) 2847 return; 2848 2849 iommu_feature_set(iommu, 2850 CONTROL_NUM_INT_REMAP_MODE_2K, 2851 CONTROL_NUM_INT_REMAP_MODE_MASK, 2852 CONTROL_NUM_INT_REMAP_MODE); 2853 } 2854 2855 static void early_enable_iommu(struct amd_iommu *iommu) 2856 { 2857 iommu_disable(iommu); 2858 iommu_init_flags(iommu); 2859 iommu_set_device_table(iommu); 2860 iommu_enable_command_buffer(iommu); 2861 iommu_enable_event_buffer(iommu); 2862 iommu_set_exclusion_range(iommu); 2863 iommu_enable_gt(iommu); 2864 iommu_enable_ga(iommu); 2865 iommu_enable_xt(iommu); 2866 iommu_enable_irtcachedis(iommu); 2867 iommu_enable_2k_int(iommu); 2868 iommu_enable(iommu); 2869 amd_iommu_flush_all_caches(iommu); 2870 } 2871 2872 /* 2873 * This function finally enables all IOMMUs found in the system after 2874 * they have been initialized. 2875 * 2876 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse 2877 * the old content of device table entries. Not this case or reuse failed, 2878 * just continue as normal kernel does. 2879 */ 2880 static void early_enable_iommus(void) 2881 { 2882 struct amd_iommu *iommu; 2883 struct amd_iommu_pci_seg *pci_seg; 2884 2885 if (!reuse_device_table()) { 2886 /* 2887 * If come here because of failure in reusing device table from old 2888 * kernel with all IOMMUs enabled, print error message and try to 2889 * free allocated old_dev_tbl_cpy. 2890 */ 2891 if (amd_iommu_pre_enabled) { 2892 pr_err("Failed to reuse DEV table from previous kernel.\n"); 2893 /* 2894 * Bail out early if unable to remap/reuse DEV table from 2895 * previous kernel if SNP enabled as IOMMU commands will 2896 * time out without DEV table and cause kdump boot panic. 2897 */ 2898 BUG_ON(check_feature(FEATURE_SNP)); 2899 } 2900 2901 for_each_pci_segment(pci_seg) { 2902 if (pci_seg->old_dev_tbl_cpy != NULL) { 2903 memunmap((void *)pci_seg->old_dev_tbl_cpy); 2904 pci_seg->old_dev_tbl_cpy = NULL; 2905 } 2906 } 2907 2908 for_each_iommu(iommu) { 2909 clear_translation_pre_enabled(iommu); 2910 early_enable_iommu(iommu); 2911 } 2912 } else { 2913 pr_info("Reused DEV table from previous kernel.\n"); 2914 2915 for_each_pci_segment(pci_seg) { 2916 iommu_free_pages(pci_seg->dev_table); 2917 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; 2918 } 2919 2920 for_each_iommu(iommu) { 2921 iommu_disable_command_buffer(iommu); 2922 iommu_disable_event_buffer(iommu); 2923 iommu_disable_irtcachedis(iommu); 2924 iommu_enable_command_buffer(iommu); 2925 iommu_enable_event_buffer(iommu); 2926 iommu_enable_ga(iommu); 2927 iommu_enable_xt(iommu); 2928 iommu_enable_irtcachedis(iommu); 2929 iommu_enable_2k_int(iommu); 2930 iommu_set_device_table(iommu); 2931 amd_iommu_flush_all_caches(iommu); 2932 } 2933 } 2934 } 2935 2936 static void enable_iommus_ppr(void) 2937 { 2938 struct amd_iommu *iommu; 2939 2940 if (!amd_iommu_gt_ppr_supported()) 2941 return; 2942 2943 for_each_iommu(iommu) 2944 amd_iommu_enable_ppr_log(iommu); 2945 } 2946 2947 static void enable_iommus_vapic(void) 2948 { 2949 #ifdef CONFIG_IRQ_REMAP 2950 u32 status, i; 2951 struct amd_iommu *iommu; 2952 2953 for_each_iommu(iommu) { 2954 /* 2955 * Disable GALog if already running. It could have been enabled 2956 * in the previous boot before kdump. 2957 */ 2958 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2959 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2960 continue; 2961 2962 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 2963 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 2964 2965 /* 2966 * Need to set and poll check the GALOGRun bit to zero before 2967 * we can set/ modify GA Log registers safely. 2968 */ 2969 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { 2970 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2971 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2972 break; 2973 udelay(10); 2974 } 2975 2976 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) 2977 return; 2978 } 2979 2980 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 2981 !check_feature(FEATURE_GAM_VAPIC)) { 2982 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2983 return; 2984 } 2985 2986 if (amd_iommu_snp_en && 2987 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { 2988 pr_warn("Force to disable Virtual APIC due to SNP\n"); 2989 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2990 return; 2991 } 2992 2993 /* Enabling GAM and SNPAVIC support */ 2994 for_each_iommu(iommu) { 2995 if (iommu_init_ga_log(iommu) || 2996 iommu_ga_log_enable(iommu)) 2997 return; 2998 2999 iommu_feature_enable(iommu, CONTROL_GAM_EN); 3000 if (amd_iommu_snp_en) 3001 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); 3002 } 3003 3004 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 3005 pr_info("Virtual APIC enabled\n"); 3006 #endif 3007 } 3008 3009 static void disable_iommus(void) 3010 { 3011 struct amd_iommu *iommu; 3012 3013 for_each_iommu(iommu) 3014 iommu_disable(iommu); 3015 3016 #ifdef CONFIG_IRQ_REMAP 3017 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 3018 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 3019 #endif 3020 } 3021 3022 /* 3023 * Suspend/Resume support 3024 * disable suspend until real resume implemented 3025 */ 3026 3027 static void amd_iommu_resume(void *data) 3028 { 3029 struct amd_iommu *iommu; 3030 3031 for_each_iommu(iommu) 3032 iommu_apply_resume_quirks(iommu); 3033 3034 /* re-load the hardware */ 3035 for_each_iommu(iommu) 3036 early_enable_iommu(iommu); 3037 3038 amd_iommu_enable_interrupts(); 3039 } 3040 3041 static int amd_iommu_suspend(void *data) 3042 { 3043 /* disable IOMMUs to go out of the way for BIOS */ 3044 disable_iommus(); 3045 3046 return 0; 3047 } 3048 3049 static const struct syscore_ops amd_iommu_syscore_ops = { 3050 .suspend = amd_iommu_suspend, 3051 .resume = amd_iommu_resume, 3052 }; 3053 3054 static struct syscore amd_iommu_syscore = { 3055 .ops = &amd_iommu_syscore_ops, 3056 }; 3057 3058 static void __init free_iommu_resources(void) 3059 { 3060 free_iommu_all(); 3061 free_pci_segments(); 3062 } 3063 3064 /* SB IOAPIC is always on this device in AMD systems */ 3065 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 3066 3067 static bool __init check_ioapic_information(void) 3068 { 3069 const char *fw_bug = FW_BUG; 3070 bool ret, has_sb_ioapic; 3071 int idx; 3072 3073 has_sb_ioapic = false; 3074 ret = false; 3075 3076 /* 3077 * If we have map overrides on the kernel command line the 3078 * messages in this function might not describe firmware bugs 3079 * anymore - so be careful 3080 */ 3081 if (cmdline_maps) 3082 fw_bug = ""; 3083 3084 for (idx = 0; idx < nr_ioapics; idx++) { 3085 int devid, id = mpc_ioapic_id(idx); 3086 3087 devid = get_ioapic_devid(id); 3088 if (devid < 0) { 3089 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 3090 fw_bug, id); 3091 ret = false; 3092 } else if (devid == IOAPIC_SB_DEVID) { 3093 has_sb_ioapic = true; 3094 ret = true; 3095 } 3096 } 3097 3098 if (!has_sb_ioapic) { 3099 /* 3100 * We expect the SB IOAPIC to be listed in the IVRS 3101 * table. The system timer is connected to the SB IOAPIC 3102 * and if we don't have it in the list the system will 3103 * panic at boot time. This situation usually happens 3104 * when the BIOS is buggy and provides us the wrong 3105 * device id for the IOAPIC in the system. 3106 */ 3107 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 3108 } 3109 3110 if (!ret) 3111 pr_err("Disabling interrupt remapping\n"); 3112 3113 return ret; 3114 } 3115 3116 static void __init free_dma_resources(void) 3117 { 3118 ida_destroy(&pdom_ids); 3119 3120 free_unity_maps(); 3121 } 3122 3123 static void __init ivinfo_init(void *ivrs) 3124 { 3125 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); 3126 } 3127 3128 /* 3129 * This is the hardware init function for AMD IOMMU in the system. 3130 * This function is called either from amd_iommu_init or from the interrupt 3131 * remapping setup code. 3132 * 3133 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 3134 * four times: 3135 * 3136 * 1 pass) Discover the most comprehensive IVHD type to use. 3137 * 3138 * 2 pass) Find the highest PCI device id the driver has to handle. 3139 * Upon this information the size of the data structures is 3140 * determined that needs to be allocated. 3141 * 3142 * 3 pass) Initialize the data structures just allocated with the 3143 * information in the ACPI table about available AMD IOMMUs 3144 * in the system. It also maps the PCI devices in the 3145 * system to specific IOMMUs 3146 * 3147 * 4 pass) After the basic data structures are allocated and 3148 * initialized we update them with information about memory 3149 * remapping requirements parsed out of the ACPI table in 3150 * this last pass. 3151 * 3152 * After everything is set up the IOMMUs are enabled and the necessary 3153 * hotplug and suspend notifiers are registered. 3154 */ 3155 static int __init early_amd_iommu_init(void) 3156 { 3157 struct acpi_table_header *ivrs_base; 3158 int ret; 3159 acpi_status status; 3160 u8 efr_hats; 3161 3162 if (!amd_iommu_detected) 3163 return -ENODEV; 3164 3165 status = acpi_get_table("IVRS", 0, &ivrs_base); 3166 if (status == AE_NOT_FOUND) 3167 return -ENODEV; 3168 else if (ACPI_FAILURE(status)) { 3169 const char *err = acpi_format_exception(status); 3170 pr_err("IVRS table error: %s\n", err); 3171 return -EINVAL; 3172 } 3173 3174 if (!boot_cpu_has(X86_FEATURE_CX16)) { 3175 pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n"); 3176 ret = -EINVAL; 3177 goto out; 3178 } 3179 3180 /* 3181 * Validate checksum here so we don't need to do it when 3182 * we actually parse the table 3183 */ 3184 ret = check_ivrs_checksum(ivrs_base); 3185 if (ret) 3186 goto out; 3187 3188 ivinfo_init(ivrs_base); 3189 3190 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 3191 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 3192 3193 /* 3194 * now the data structures are allocated and basically initialized 3195 * start the real acpi table scan 3196 */ 3197 ret = init_iommu_all(ivrs_base); 3198 if (ret) 3199 goto out; 3200 3201 /* 5 level guest page table */ 3202 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3203 FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) 3204 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; 3205 3206 efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr); 3207 if (efr_hats != 0x3) { 3208 /* 3209 * efr[HATS] bits specify the maximum host translation level 3210 * supported, with LEVEL 4 being initial max level. 3211 */ 3212 amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL; 3213 } else { 3214 pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n", 3215 efr_hats); 3216 amd_iommu_hatdis = true; 3217 } 3218 3219 if (amd_iommu_pgtable == PD_MODE_V2) { 3220 if (!amd_iommu_v2_pgtbl_supported()) { 3221 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); 3222 amd_iommu_pgtable = PD_MODE_V1; 3223 } 3224 } 3225 3226 if (amd_iommu_hatdis) { 3227 /* 3228 * Host (v1) page table is not available. Attempt to use 3229 * Guest (v2) page table. 3230 */ 3231 if (amd_iommu_v2_pgtbl_supported()) 3232 amd_iommu_pgtable = PD_MODE_V2; 3233 else 3234 amd_iommu_pgtable = PD_MODE_NONE; 3235 } 3236 3237 /* Disable any previously enabled IOMMUs */ 3238 if (!is_kdump_kernel() || amd_iommu_disabled) 3239 disable_iommus(); 3240 3241 if (amd_iommu_irq_remap) 3242 amd_iommu_irq_remap = check_ioapic_information(); 3243 3244 if (amd_iommu_irq_remap) { 3245 struct amd_iommu_pci_seg *pci_seg; 3246 ret = -ENOMEM; 3247 for_each_pci_segment(pci_seg) { 3248 if (alloc_irq_lookup_table(pci_seg)) 3249 goto out; 3250 } 3251 } 3252 3253 ret = init_memory_definitions(ivrs_base); 3254 if (ret) 3255 goto out; 3256 3257 /* init the device table */ 3258 init_device_table(); 3259 3260 out: 3261 /* Don't leak any ACPI memory */ 3262 acpi_put_table(ivrs_base); 3263 3264 return ret; 3265 } 3266 3267 static int amd_iommu_enable_interrupts(void) 3268 { 3269 struct amd_iommu *iommu; 3270 int ret = 0; 3271 3272 for_each_iommu(iommu) { 3273 ret = iommu_init_irq(iommu); 3274 if (ret) 3275 goto out; 3276 } 3277 3278 /* 3279 * Interrupt handler is ready to process interrupts. Enable 3280 * PPR and GA log interrupt for all IOMMUs. 3281 */ 3282 enable_iommus_vapic(); 3283 enable_iommus_ppr(); 3284 3285 out: 3286 return ret; 3287 } 3288 3289 static bool __init detect_ivrs(void) 3290 { 3291 struct acpi_table_header *ivrs_base; 3292 acpi_status status; 3293 int i; 3294 3295 status = acpi_get_table("IVRS", 0, &ivrs_base); 3296 if (status == AE_NOT_FOUND) 3297 return false; 3298 else if (ACPI_FAILURE(status)) { 3299 const char *err = acpi_format_exception(status); 3300 pr_err("IVRS table error: %s\n", err); 3301 return false; 3302 } 3303 3304 acpi_put_table(ivrs_base); 3305 3306 if (amd_iommu_force_enable) 3307 goto out; 3308 3309 /* Don't use IOMMU if there is Stoney Ridge graphics */ 3310 for (i = 0; i < 32; i++) { 3311 u32 pci_id; 3312 3313 pci_id = read_pci_config(0, i, 0, 0); 3314 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 3315 pr_info("Disable IOMMU on Stoney Ridge\n"); 3316 return false; 3317 } 3318 } 3319 3320 out: 3321 /* Make sure ACS will be enabled during PCI probe */ 3322 pci_request_acs(); 3323 3324 return true; 3325 } 3326 3327 static __init void iommu_snp_enable(void) 3328 { 3329 #ifdef CONFIG_KVM_AMD_SEV 3330 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 3331 return; 3332 /* 3333 * The SNP support requires that IOMMU must be enabled, and is 3334 * configured with V1 page table (DTE[Mode] = 0 is not supported). 3335 */ 3336 if (no_iommu || iommu_default_passthrough()) { 3337 pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); 3338 goto disable_snp; 3339 } 3340 3341 if (amd_iommu_pgtable != PD_MODE_V1) { 3342 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); 3343 goto disable_snp; 3344 } 3345 3346 amd_iommu_snp_en = check_feature(FEATURE_SNP); 3347 if (!amd_iommu_snp_en) { 3348 pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); 3349 goto disable_snp; 3350 } 3351 3352 /* 3353 * Enable host SNP support once SNP support is checked on IOMMU. 3354 */ 3355 if (snp_rmptable_init()) { 3356 pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n"); 3357 goto disable_snp; 3358 } 3359 3360 pr_info("IOMMU SNP support enabled.\n"); 3361 return; 3362 3363 disable_snp: 3364 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 3365 #endif 3366 } 3367 3368 /**************************************************************************** 3369 * 3370 * AMD IOMMU Initialization State Machine 3371 * 3372 ****************************************************************************/ 3373 3374 static int __init state_next(void) 3375 { 3376 int ret = 0; 3377 3378 switch (init_state) { 3379 case IOMMU_START_STATE: 3380 if (!detect_ivrs()) { 3381 init_state = IOMMU_NOT_FOUND; 3382 ret = -ENODEV; 3383 } else { 3384 init_state = IOMMU_IVRS_DETECTED; 3385 } 3386 break; 3387 case IOMMU_IVRS_DETECTED: 3388 if (amd_iommu_disabled) { 3389 init_state = IOMMU_CMDLINE_DISABLED; 3390 ret = -EINVAL; 3391 } else { 3392 ret = early_amd_iommu_init(); 3393 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 3394 } 3395 break; 3396 case IOMMU_ACPI_FINISHED: 3397 early_enable_iommus(); 3398 x86_platform.iommu_shutdown = disable_iommus; 3399 init_state = IOMMU_ENABLED; 3400 break; 3401 case IOMMU_ENABLED: 3402 register_syscore(&amd_iommu_syscore); 3403 iommu_snp_enable(); 3404 ret = amd_iommu_init_pci(); 3405 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 3406 break; 3407 case IOMMU_PCI_INIT: 3408 ret = amd_iommu_enable_interrupts(); 3409 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 3410 break; 3411 case IOMMU_INTERRUPTS_EN: 3412 init_state = IOMMU_INITIALIZED; 3413 break; 3414 case IOMMU_INITIALIZED: 3415 /* Nothing to do */ 3416 break; 3417 case IOMMU_NOT_FOUND: 3418 case IOMMU_INIT_ERROR: 3419 case IOMMU_CMDLINE_DISABLED: 3420 /* Error states => do nothing */ 3421 ret = -EINVAL; 3422 break; 3423 default: 3424 /* Unknown state */ 3425 BUG(); 3426 } 3427 3428 if (ret) { 3429 free_dma_resources(); 3430 if (!irq_remapping_enabled) { 3431 disable_iommus(); 3432 free_iommu_resources(); 3433 } else { 3434 struct amd_iommu *iommu; 3435 struct amd_iommu_pci_seg *pci_seg; 3436 3437 for_each_pci_segment(pci_seg) 3438 uninit_device_table_dma(pci_seg); 3439 3440 for_each_iommu(iommu) 3441 amd_iommu_flush_all_caches(iommu); 3442 } 3443 } 3444 return ret; 3445 } 3446 3447 static int __init iommu_go_to_state(enum iommu_init_state state) 3448 { 3449 int ret = -EINVAL; 3450 3451 while (init_state != state) { 3452 if (init_state == IOMMU_NOT_FOUND || 3453 init_state == IOMMU_INIT_ERROR || 3454 init_state == IOMMU_CMDLINE_DISABLED) 3455 break; 3456 ret = state_next(); 3457 } 3458 3459 /* 3460 * SNP platform initilazation requires IOMMUs to be fully configured. 3461 * If the SNP support on IOMMUs has NOT been checked, simply mark SNP 3462 * as unsupported. If the SNP support on IOMMUs has been checked and 3463 * host SNP support enabled but RMP enforcement has not been enabled 3464 * in IOMMUs, then the system is in a half-baked state, but can limp 3465 * along as all memory should be Hypervisor-Owned in the RMP. WARN, 3466 * but leave SNP as "supported" to avoid confusing the kernel. 3467 */ 3468 if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) && 3469 !WARN_ON_ONCE(amd_iommu_snp_en)) 3470 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 3471 3472 return ret; 3473 } 3474 3475 #ifdef CONFIG_IRQ_REMAP 3476 int __init amd_iommu_prepare(void) 3477 { 3478 int ret; 3479 3480 amd_iommu_irq_remap = true; 3481 3482 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 3483 if (ret) { 3484 amd_iommu_irq_remap = false; 3485 return ret; 3486 } 3487 3488 return amd_iommu_irq_remap ? 0 : -ENODEV; 3489 } 3490 3491 int __init amd_iommu_enable(void) 3492 { 3493 int ret; 3494 3495 ret = iommu_go_to_state(IOMMU_ENABLED); 3496 if (ret) 3497 return ret; 3498 3499 irq_remapping_enabled = 1; 3500 return amd_iommu_xt_mode; 3501 } 3502 3503 void amd_iommu_disable(void) 3504 { 3505 amd_iommu_suspend(NULL); 3506 } 3507 3508 int amd_iommu_reenable(int mode) 3509 { 3510 amd_iommu_resume(NULL); 3511 3512 return 0; 3513 } 3514 3515 int amd_iommu_enable_faulting(unsigned int cpu) 3516 { 3517 /* We enable MSI later when PCI is initialized */ 3518 return 0; 3519 } 3520 #endif 3521 3522 /* 3523 * This is the core init function for AMD IOMMU hardware in the system. 3524 * This function is called from the generic x86 DMA layer initialization 3525 * code. 3526 */ 3527 static int __init amd_iommu_init(void) 3528 { 3529 int ret; 3530 3531 ret = iommu_go_to_state(IOMMU_INITIALIZED); 3532 #ifdef CONFIG_GART_IOMMU 3533 if (ret && list_empty(&amd_iommu_list)) { 3534 /* 3535 * We failed to initialize the AMD IOMMU - try fallback 3536 * to GART if possible. 3537 */ 3538 gart_iommu_init(); 3539 } 3540 #endif 3541 3542 if (!ret) 3543 amd_iommu_debugfs_setup(); 3544 3545 return ret; 3546 } 3547 3548 static bool amd_iommu_sme_check(void) 3549 { 3550 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || 3551 (boot_cpu_data.x86 != 0x17)) 3552 return true; 3553 3554 /* For Fam17h, a specific level of support is required */ 3555 if (boot_cpu_data.microcode >= 0x08001205) 3556 return true; 3557 3558 if ((boot_cpu_data.microcode >= 0x08001126) && 3559 (boot_cpu_data.microcode <= 0x080011ff)) 3560 return true; 3561 3562 pr_notice("IOMMU not currently supported when SME is active\n"); 3563 3564 return false; 3565 } 3566 3567 /**************************************************************************** 3568 * 3569 * Early detect code. This code runs at IOMMU detection time in the DMA 3570 * layer. It just looks if there is an IVRS ACPI table to detect AMD 3571 * IOMMUs 3572 * 3573 ****************************************************************************/ 3574 void __init amd_iommu_detect(void) 3575 { 3576 int ret; 3577 3578 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 3579 goto disable_snp; 3580 3581 if (!amd_iommu_sme_check()) 3582 goto disable_snp; 3583 3584 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 3585 if (ret) 3586 goto disable_snp; 3587 3588 amd_iommu_detected = true; 3589 iommu_detected = 1; 3590 x86_init.iommu.iommu_init = amd_iommu_init; 3591 return; 3592 3593 disable_snp: 3594 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 3595 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 3596 } 3597 3598 /**************************************************************************** 3599 * 3600 * Parsing functions for the AMD IOMMU specific kernel command line 3601 * options. 3602 * 3603 ****************************************************************************/ 3604 3605 static int __init parse_amd_iommu_dump(char *str) 3606 { 3607 amd_iommu_dump = true; 3608 3609 return 1; 3610 } 3611 3612 static int __init parse_amd_iommu_intr(char *str) 3613 { 3614 for (; *str; ++str) { 3615 if (strncmp(str, "legacy", 6) == 0) { 3616 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 3617 break; 3618 } 3619 if (strncmp(str, "vapic", 5) == 0) { 3620 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 3621 break; 3622 } 3623 } 3624 return 1; 3625 } 3626 3627 static int __init parse_amd_iommu_options(char *str) 3628 { 3629 if (!str) 3630 return -EINVAL; 3631 3632 while (*str) { 3633 if (strncmp(str, "fullflush", 9) == 0) { 3634 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); 3635 iommu_set_dma_strict(); 3636 } else if (strncmp(str, "force_enable", 12) == 0) { 3637 amd_iommu_force_enable = true; 3638 } else if (strncmp(str, "off", 3) == 0) { 3639 amd_iommu_disabled = true; 3640 } else if (strncmp(str, "force_isolation", 15) == 0) { 3641 amd_iommu_force_isolation = true; 3642 } else if (strncmp(str, "pgtbl_v1", 8) == 0) { 3643 amd_iommu_pgtable = PD_MODE_V1; 3644 } else if (strncmp(str, "pgtbl_v2", 8) == 0) { 3645 amd_iommu_pgtable = PD_MODE_V2; 3646 } else if (strncmp(str, "irtcachedis", 11) == 0) { 3647 amd_iommu_irtcachedis = true; 3648 } else if (strncmp(str, "nohugepages", 11) == 0) { 3649 pr_info("Restricting V1 page-sizes to 4KiB"); 3650 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K; 3651 } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) { 3652 pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB"); 3653 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; 3654 } else { 3655 pr_notice("Unknown option - '%s'\n", str); 3656 } 3657 3658 str += strcspn(str, ","); 3659 while (*str == ',') 3660 str++; 3661 } 3662 3663 return 1; 3664 } 3665 3666 static int __init parse_ivrs_ioapic(char *str) 3667 { 3668 u32 seg = 0, bus, dev, fn; 3669 int id, i; 3670 u32 devid; 3671 3672 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3673 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3674 goto found; 3675 3676 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3677 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3678 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", 3679 str, id, seg, bus, dev, fn); 3680 goto found; 3681 } 3682 3683 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3684 return 1; 3685 3686 found: 3687 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3688 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3689 str); 3690 return 1; 3691 } 3692 3693 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3694 3695 cmdline_maps = true; 3696 i = early_ioapic_map_size++; 3697 early_ioapic_map[i].id = id; 3698 early_ioapic_map[i].devid = devid; 3699 early_ioapic_map[i].cmd_line = true; 3700 3701 return 1; 3702 } 3703 3704 static int __init parse_ivrs_hpet(char *str) 3705 { 3706 u32 seg = 0, bus, dev, fn; 3707 int id, i; 3708 u32 devid; 3709 3710 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3711 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3712 goto found; 3713 3714 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3715 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3716 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", 3717 str, id, seg, bus, dev, fn); 3718 goto found; 3719 } 3720 3721 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3722 return 1; 3723 3724 found: 3725 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3726 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3727 str); 3728 return 1; 3729 } 3730 3731 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3732 3733 cmdline_maps = true; 3734 i = early_hpet_map_size++; 3735 early_hpet_map[i].id = id; 3736 early_hpet_map[i].devid = devid; 3737 early_hpet_map[i].cmd_line = true; 3738 3739 return 1; 3740 } 3741 3742 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) 3743 3744 static int __init parse_ivrs_acpihid(char *str) 3745 { 3746 u32 seg = 0, bus, dev, fn; 3747 char *hid, *uid, *p, *addr; 3748 char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ 3749 int i; 3750 3751 addr = strchr(str, '@'); 3752 if (!addr) { 3753 addr = strchr(str, '='); 3754 if (!addr) 3755 goto not_found; 3756 3757 ++addr; 3758 3759 if (strlen(addr) > ACPIID_LEN) 3760 goto not_found; 3761 3762 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || 3763 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { 3764 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", 3765 str, acpiid, seg, bus, dev, fn); 3766 goto found; 3767 } 3768 goto not_found; 3769 } 3770 3771 /* We have the '@', make it the terminator to get just the acpiid */ 3772 *addr++ = 0; 3773 3774 if (strlen(str) > ACPIID_LEN) 3775 goto not_found; 3776 3777 if (sscanf(str, "=%s", acpiid) != 1) 3778 goto not_found; 3779 3780 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || 3781 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) 3782 goto found; 3783 3784 not_found: 3785 pr_err("Invalid command line: ivrs_acpihid%s\n", str); 3786 return 1; 3787 3788 found: 3789 p = acpiid; 3790 hid = strsep(&p, ":"); 3791 uid = p; 3792 3793 if (!hid || !(*hid) || !uid) { 3794 pr_err("Invalid command line: hid or uid\n"); 3795 return 1; 3796 } 3797 3798 /* 3799 * Ignore leading zeroes after ':', so e.g., AMDI0095:00 3800 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match 3801 */ 3802 while (*uid == '0' && *(uid + 1)) 3803 uid++; 3804 3805 if (strlen(hid) >= ACPIHID_HID_LEN) { 3806 pr_err("Invalid command line: hid is too long\n"); 3807 return 1; 3808 } else if (strlen(uid) >= ACPIHID_UID_LEN) { 3809 pr_err("Invalid command line: uid is too long\n"); 3810 return 1; 3811 } 3812 3813 i = early_acpihid_map_size++; 3814 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3815 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3816 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3817 early_acpihid_map[i].cmd_line = true; 3818 3819 return 1; 3820 } 3821 3822 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3823 __setup("amd_iommu=", parse_amd_iommu_options); 3824 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3825 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3826 __setup("ivrs_hpet", parse_ivrs_hpet); 3827 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3828 3829 bool amd_iommu_pasid_supported(void) 3830 { 3831 /* CPU page table size should match IOMMU guest page table size */ 3832 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3833 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL) 3834 return false; 3835 3836 /* 3837 * Since DTE[Mode]=0 is prohibited on SNP-enabled system 3838 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without 3839 * setting up IOMMUv1 page table. 3840 */ 3841 return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en; 3842 } 3843 3844 struct amd_iommu *get_amd_iommu(unsigned int idx) 3845 { 3846 unsigned int i = 0; 3847 struct amd_iommu *iommu; 3848 3849 for_each_iommu(iommu) 3850 if (i++ == idx) 3851 return iommu; 3852 return NULL; 3853 } 3854 3855 /**************************************************************************** 3856 * 3857 * IOMMU EFR Performance Counter support functionality. This code allows 3858 * access to the IOMMU PC functionality. 3859 * 3860 ****************************************************************************/ 3861 3862 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3863 { 3864 struct amd_iommu *iommu = get_amd_iommu(idx); 3865 3866 if (iommu) 3867 return iommu->max_banks; 3868 3869 return 0; 3870 } 3871 3872 bool amd_iommu_pc_supported(void) 3873 { 3874 return amd_iommu_pc_present; 3875 } 3876 3877 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3878 { 3879 struct amd_iommu *iommu = get_amd_iommu(idx); 3880 3881 if (iommu) 3882 return iommu->max_counters; 3883 3884 return 0; 3885 } 3886 3887 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3888 u8 fxn, u64 *value, bool is_write) 3889 { 3890 u32 offset; 3891 u32 max_offset_lim; 3892 3893 /* Make sure the IOMMU PC resource is available */ 3894 if (!amd_iommu_pc_present) 3895 return -ENODEV; 3896 3897 /* Check for valid iommu and pc register indexing */ 3898 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3899 return -ENODEV; 3900 3901 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3902 3903 /* Limit the offset to the hw defined mmio region aperture */ 3904 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3905 (iommu->max_counters << 8) | 0x28); 3906 if ((offset < MMIO_CNTR_REG_OFFSET) || 3907 (offset > max_offset_lim)) 3908 return -EINVAL; 3909 3910 if (is_write) { 3911 u64 val = *value & GENMASK_ULL(47, 0); 3912 3913 writel((u32)val, iommu->mmio_base + offset); 3914 writel((val >> 32), iommu->mmio_base + offset + 4); 3915 } else { 3916 *value = readl(iommu->mmio_base + offset + 4); 3917 *value <<= 32; 3918 *value |= readl(iommu->mmio_base + offset); 3919 *value &= GENMASK_ULL(47, 0); 3920 } 3921 3922 return 0; 3923 } 3924 3925 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3926 { 3927 if (!iommu) 3928 return -EINVAL; 3929 3930 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3931 } 3932 3933 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3934 { 3935 if (!iommu) 3936 return -EINVAL; 3937 3938 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3939 } 3940 3941 #ifdef CONFIG_KVM_AMD_SEV 3942 static int iommu_page_make_shared(void *page) 3943 { 3944 unsigned long paddr, pfn; 3945 3946 paddr = iommu_virt_to_phys(page); 3947 /* Cbit maybe set in the paddr */ 3948 pfn = __sme_clr(paddr) >> PAGE_SHIFT; 3949 3950 if (!(pfn % PTRS_PER_PMD)) { 3951 int ret, level; 3952 bool assigned; 3953 3954 ret = snp_lookup_rmpentry(pfn, &assigned, &level); 3955 if (ret) { 3956 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret); 3957 return ret; 3958 } 3959 3960 if (!assigned) { 3961 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn); 3962 return -EINVAL; 3963 } 3964 3965 if (level > PG_LEVEL_4K) { 3966 ret = psmash(pfn); 3967 if (!ret) 3968 goto done; 3969 3970 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n", 3971 pfn, ret, level); 3972 return ret; 3973 } 3974 } 3975 3976 done: 3977 return rmp_make_shared(pfn, PG_LEVEL_4K); 3978 } 3979 3980 static int iommu_make_shared(void *va, size_t size) 3981 { 3982 void *page; 3983 int ret; 3984 3985 if (!va) 3986 return 0; 3987 3988 for (page = va; page < (va + size); page += PAGE_SIZE) { 3989 ret = iommu_page_make_shared(page); 3990 if (ret) 3991 return ret; 3992 } 3993 3994 return 0; 3995 } 3996 3997 int amd_iommu_snp_disable(void) 3998 { 3999 struct amd_iommu *iommu; 4000 int ret; 4001 4002 if (!amd_iommu_snp_en) 4003 return 0; 4004 4005 for_each_iommu(iommu) { 4006 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); 4007 if (ret) 4008 return ret; 4009 4010 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); 4011 if (ret) 4012 return ret; 4013 4014 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); 4015 if (ret) 4016 return ret; 4017 } 4018 4019 return 0; 4020 } 4021 EXPORT_SYMBOL_GPL(amd_iommu_snp_disable); 4022 #endif 4023