Lines Matching +full:segment +full:- +full:1 +full:a
1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD SVM-SEV Host Support.
20 #include <linux/amd-iommu.h>
40 u8 assigned :1,
42 u8 pagesize :1,
43 hpage_region_status :1,
45 u8 immutable :1,
61 u64 assigned : 1,
62 pagesize : 1,
63 immutable : 1,
67 vmsa : 1,
68 validated : 1,
69 rsvd2 : 1;
83 * For a non-segmented RMP table, use the maximum physical addressing as the
84 * segment size in order to always arrive at index 0 in the table.
96 * - The segment size is used for two purposes:
97 * - Identify the amount of memory covered by an RMP segment
98 * - Quickly locate an RMP segment table entry for a physical address
100 * - The RMP segment table contains pointers to an RMP table that covers
101 * a specific portion of memory. There can be up to 512 8-byte entries,
120 /* Mask to apply to a PFN to get the first PFN of a 2MB page */
121 #define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
131 #define pr_fmt(fmt) "SEV-SNP: " fmt
184 * from within that chunk which then causes a fatal RMP fault. in __snp_fixup_e820_tables()
195 * the kexec-ed kernel. in __snp_fixup_e820_tables()
199 pr_info("Reserving start/end of RMP table on a 2MB boundary [0x%016llx]\n", pa); in __snp_fixup_e820_tables()
232 * the segment coverage size, but gets reduced to the in fixup_e820_tables_for_segmented_rmp()
233 * segment coverage size. in fixup_e820_tables_for_segmented_rmp()
239 /* Calculate the RMP segment size (16 bytes/page mapped) */ in fixup_e820_tables_for_segmented_rmp()
289 /* Validate the RMP segment size */ in alloc_rmp_segment_desc()
291 pr_err("Invalid RMP size 0x%llx for configured segment size 0x%llx\n", in alloc_rmp_segment_desc()
296 /* Validate the RMP segment table index */ in alloc_rmp_segment_desc()
299 pr_err("Invalid RMP segment base address 0x%llx for configured segment size 0x%llx\n", in alloc_rmp_segment_desc()
305 pr_err("RMP segment descriptor already exists at index %llu\n", rst_index); in alloc_rmp_segment_desc()
311 pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n", in alloc_rmp_segment_desc()
322 desc->rmp_entry = rmp_segment; in alloc_rmp_segment_desc()
323 desc->max_index = segment_size / sizeof(*desc->rmp_entry); in alloc_rmp_segment_desc()
324 desc->size = segment_size; in alloc_rmp_segment_desc()
342 memunmap(desc->rmp_entry); in free_rmp_segment_table()
373 rmp_end = probed_rmp_base + probed_rmp_size - 1; in setup_contiguous_rmptable()
396 rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ; in setup_contiguous_rmptable()
420 pr_err("Failed to map RMP segment table addr 0x%llx\n", rst_pa); in setup_segmented_rmptable()
441 * segment coverage size, but gets reduced to the segment in setup_segmented_rmptable()
446 pr_info("RMP segment %u mapped size (0x%llx) reduced to 0x%llx\n", in setup_segmented_rmptable()
453 /* Calculate the RMP segment size (16 bytes/page mapped) */ in setup_segmented_rmptable()
468 pr_info("RMP segment %u physical address [0x%llx - 0x%llx] covering [0x%llx - 0x%llx]\n", in setup_segmented_rmptable()
469 i, rmp_segment, rmp_segment + rmp_size - 1, pa, pa + mapped_size - 1); in setup_segmented_rmptable()
479 rst_max_index = max_index + 1; in setup_segmented_rmptable()
514 return -ENOSYS; in snp_rmptable_init()
517 return -ENOSYS; in snp_rmptable_init()
520 return -ENOSYS; in snp_rmptable_init()
523 * Check if SEV-SNP is already enabled, this can happen in case of in snp_rmptable_init()
533 return -ENOSYS; in snp_rmptable_init()
544 memset(desc->rmp_entry, 0, desc->size); in snp_rmptable_init()
551 on_each_cpu(mfd_enable, NULL, 1); in snp_rmptable_init()
553 on_each_cpu(snp_enable, NULL, 1); in snp_rmptable_init()
570 rmp_segment_size = 1ULL << rmp_segment_shift; in set_rmp_segment_info()
571 rmp_segment_mask = rmp_segment_size - 1; in set_rmp_segment_info()
593 rmp_sz = rmp_end - rmp_base + 1; in probe_contiguous_rmptable_info()
595 /* Treat the contiguous RMP table as a single segment */ in probe_contiguous_rmptable_info()
596 rst_max_index = 1; in probe_contiguous_rmptable_info()
603 pr_info("RMP table physical range [0x%016llx - 0x%016llx]\n", in probe_contiguous_rmptable_info()
622 "Segmented RMP enabled but RMP_END MSR is non-zero\n"); in probe_segmented_rmptable_info()
624 /* Obtain the min and max supported RMP segment size */ in probe_segmented_rmptable_info()
629 /* Verify the segment size is within the supported limits */ in probe_segmented_rmptable_info()
632 pr_err("RMP segment size (%u) is not within advertised bounds (min=%u, max=%u)\n", in probe_segmented_rmptable_info()
637 /* Override the max supported RST index if a hardware limit exists */ in probe_segmented_rmptable_info()
647 pr_info("Segmented RMP base table physical range [0x%016llx - 0x%016llx]\n", in probe_segmented_rmptable_info()
670 * speculation should be stopped as a protective measure.
678 return ERR_PTR(-ENODEV); in get_raw_rmpentry()
684 return ERR_PTR(-EFAULT); in get_raw_rmpentry()
690 return ERR_PTR(-EFAULT); in get_raw_rmpentry()
693 if (unlikely(segment_index >= desc->max_index)) in get_raw_rmpentry()
694 return ERR_PTR(-EFAULT); in get_raw_rmpentry()
696 segment_index = array_index_nospec(segment_index, desc->max_index); in get_raw_rmpentry()
698 return desc->rmp_entry + segment_index; in get_raw_rmpentry()
710 : "=a" (ret) in get_rmpentry()
711 : "a" (pfn << PAGE_SHIFT), "c" (e) in get_rmpentry()
728 e->gpa = e_raw->gpa << PAGE_SHIFT; in get_rmpentry()
729 e->asid = e_raw->asid; in get_rmpentry()
730 e->assigned = e_raw->assigned; in get_rmpentry()
731 e->pagesize = e_raw->pagesize; in get_rmpentry()
732 e->immutable = e_raw->immutable; in get_rmpentry()
743 return -ENODEV; in __snp_lookup_rmpentry()
750 * Find the authoritative RMP entry for a PFN. This can be either a 4K in __snp_lookup_rmpentry()
751 * RMP entry or a special large RMP entry that is authoritative for a in __snp_lookup_rmpentry()
778 * Dump the raw RMP entry for a particular PFN. These bits are documented in the
779 * PPR for a particular CPU model and provide useful information about how a
805 pr_info("PFN 0x%llx, RMP entry: [0x%016llx - 0x%016llx]\n", in dump_rmpentry()
806 pfn, e_raw->lo, e_raw->hi); in dump_rmpentry()
811 * If the RMP entry for a particular PFN is not in an assigned state, in dump_rmpentry()
814 * those too can affect the ability to access a particular PFN in in dump_rmpentry()
815 * certain situations, such as when the PFN is being accessed via a 2MB in dump_rmpentry()
821 pr_info("PFN 0x%llx unassigned, dumping non-zero entries in 2M PFN region: [0x%llx - 0x%llx]\n", in dump_rmpentry()
833 if (e_raw->lo || e_raw->hi) in dump_rmpentry()
834 pr_info("PFN: 0x%llx, [0x%016llx - 0x%016llx]\n", pfn_i, e_raw->lo, e_raw->hi); in dump_rmpentry()
860 * PSMASH a 2MB aligned page into 4K pages in the RMP table while preserving the
869 return -ENODEV; in psmash()
872 return -EINVAL; in psmash()
876 : "=a" (ret) in psmash()
877 : "a" (paddr) in psmash()
885 * If the kernel uses a 2MB or larger directmap mapping to write to an address,
887 * table, an RMP #PF will trigger and cause a host crash. Hypervisor code that
888 * owns the PFNs being transitioned will never attempt such a write, but other
890 * inadvertently due a large directmap mapping that happens to overlap such a
893 * Prevent this by splitting any 2MB+ mappings that might end up containing a
894 * mix of private/shared PFNs as a result of a subsequent RMPUPDATE for the
898 * physical range, since it would only be worthwhile in determining if a
899 * subsequent RMPUPDATE for a 4KB PFN would result in all the entries being of
901 * But that would mean the entries are currently in a mixed state, and so the
902 * mapping would have already been split as a result of prior transitions.
904 * currently a mechanism in place to restore 2MB+ mappings, such a check would
918 * pfn_to_kaddr() will return a vaddr only within the direct in adjust_direct_map()
925 return -EINVAL; in adjust_direct_map()
928 return -EINVAL; in adjust_direct_map()
931 (!IS_ALIGNED(pfn, PTRS_PER_PMD) || !pfn_valid(pfn + PTRS_PER_PMD - 1))) in adjust_direct_map()
932 return -EINVAL; in adjust_direct_map()
937 * since even accesses from 1GB mappings will be treated as 2MB accesses in adjust_direct_map()
965 * If not, one could consider introducing a mutex or so here to sync concurrent
978 return -ENODEV; in rmpupdate()
980 level = RMP_TO_PG_LEVEL(state->pagesize); in rmpupdate()
983 return -EFAULT; in rmpupdate()
988 : "=a" (ret) in rmpupdate()
989 : "a" (paddr), "c" ((unsigned long)state) in rmpupdate()
998 return -EFAULT; in rmpupdate()
1004 /* Transition a page to guest-owned/private state in the RMP table. */
1010 state.assigned = 1; in rmp_make_private()
1020 /* Transition a page to hypervisor-owned/shared state in the RMP table. */
1036 pr_warn("Leaking PFN range 0x%llx-0x%llx\n", pfn, pfn + npages); in __snp_leak_pages()
1039 while (npages--) { in __snp_leak_pages()
1043 * pages list. This page should not be on a free list currently in __snp_leak_pages()
1044 * and is also unsafe to be added to a free list. in __snp_leak_pages()
1050 * page->buddy_list of tail pages is not usable. in __snp_leak_pages()
1053 list_add_tail(&page->buddy_list, &snp_leaked_pages_list); in __snp_leak_pages()