1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Helpers used for nested SVM testing 4 * Largely inspired from KVM unit test svm.c 5 * 6 * Copyright (C) 2020, Red Hat, Inc. 7 */ 8 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "processor.h" 12 #include "svm_util.h" 13 14 #define SEV_DEV_PATH "/dev/sev" 15 16 struct gpr64_regs guest_regs; 17 u64 rflags; 18 19 /* Allocate memory regions for nested SVM tests. 20 * 21 * Input Args: 22 * vm - The VM to allocate guest-virtual addresses in. 23 * 24 * Output Args: 25 * p_svm_gva - The guest virtual address for the struct svm_test_data. 26 * 27 * Return: 28 * Pointer to structure with the addresses of the SVM areas. 29 */ 30 struct svm_test_data * 31 vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) 32 { 33 vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); 34 struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); 35 36 svm->vmcb = (void *)vm_vaddr_alloc_page(vm); 37 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); 38 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); 39 40 svm->save_area = (void *)vm_vaddr_alloc_page(vm); 41 svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); 42 svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); 43 44 svm->msr = (void *)vm_vaddr_alloc_page(vm); 45 svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); 46 svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); 47 memset(svm->msr_hva, 0, getpagesize()); 48 49 if (vm->stage2_mmu.pgd_created) 50 svm->ncr3_gpa = vm->stage2_mmu.pgd; 51 52 *p_svm_gva = svm_gva; 53 return svm; 54 } 55 56 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 57 u64 base, u32 limit, u32 attr) 58 { 59 seg->selector = selector; 60 seg->attrib = attr; 61 seg->limit = limit; 62 seg->base = base; 63 } 64 65 void vm_enable_npt(struct kvm_vm *vm) 66 { 67 struct pte_masks pte_masks; 68 69 TEST_ASSERT(kvm_cpu_has_npt(), "KVM doesn't supported nested NPT"); 70 71 /* 72 * NPTs use the same PTE format, but deliberately drop the C-bit as the 73 * per-VM shared vs. private information is only meant for stage-1. 74 */ 75 pte_masks = vm->mmu.arch.pte_masks; 76 pte_masks.c = 0; 77 78 /* NPT walks are treated as user accesses, so set the 'user' bit. */ 79 pte_masks.always_set = pte_masks.user; 80 81 tdp_mmu_init(vm, vm->mmu.pgtable_levels, &pte_masks); 82 } 83 84 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) 85 { 86 struct vmcb *vmcb = svm->vmcb; 87 uint64_t vmcb_gpa = svm->vmcb_gpa; 88 struct vmcb_save_area *save = &vmcb->save; 89 struct vmcb_control_area *ctrl = &vmcb->control; 90 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 91 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 92 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 93 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 94 uint64_t efer; 95 96 efer = rdmsr(MSR_EFER); 97 wrmsr(MSR_EFER, efer | EFER_SVME); 98 wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); 99 100 memset(vmcb, 0, sizeof(*vmcb)); 101 asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); 102 vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); 103 vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); 104 vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); 105 vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr); 106 vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0); 107 vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0); 108 109 ctrl->asid = 1; 110 save->cpl = 0; 111 save->efer = rdmsr(MSR_EFER); 112 asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory"); 113 asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory"); 114 asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory"); 115 asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory"); 116 asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory"); 117 asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory"); 118 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 119 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 120 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | 121 (1ULL << INTERCEPT_VMMCALL); 122 ctrl->msrpm_base_pa = svm->msr_gpa; 123 124 vmcb->save.rip = (u64)guest_rip; 125 vmcb->save.rsp = (u64)guest_rsp; 126 guest_regs.rdi = (u64)svm; 127 128 if (svm->ncr3_gpa) { 129 ctrl->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; 130 ctrl->nested_cr3 = svm->ncr3_gpa; 131 } 132 } 133 134 /* 135 * save/restore 64-bit general registers except rax, rip, rsp 136 * which are directly handed through the VMCB guest processor state 137 */ 138 #define SAVE_GPR_C \ 139 "xchg %%rbx, guest_regs+0x20\n\t" \ 140 "xchg %%rcx, guest_regs+0x10\n\t" \ 141 "xchg %%rdx, guest_regs+0x18\n\t" \ 142 "xchg %%rbp, guest_regs+0x30\n\t" \ 143 "xchg %%rsi, guest_regs+0x38\n\t" \ 144 "xchg %%rdi, guest_regs+0x40\n\t" \ 145 "xchg %%r8, guest_regs+0x48\n\t" \ 146 "xchg %%r9, guest_regs+0x50\n\t" \ 147 "xchg %%r10, guest_regs+0x58\n\t" \ 148 "xchg %%r11, guest_regs+0x60\n\t" \ 149 "xchg %%r12, guest_regs+0x68\n\t" \ 150 "xchg %%r13, guest_regs+0x70\n\t" \ 151 "xchg %%r14, guest_regs+0x78\n\t" \ 152 "xchg %%r15, guest_regs+0x80\n\t" 153 154 #define LOAD_GPR_C SAVE_GPR_C 155 156 /* 157 * selftests do not use interrupts so we dropped clgi/sti/cli/stgi 158 * for now. registers involved in LOAD/SAVE_GPR_C are eventually 159 * unmodified so they do not need to be in the clobber list. 160 */ 161 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) 162 { 163 asm volatile ( 164 "vmload %[vmcb_gpa]\n\t" 165 "mov rflags, %%r15\n\t" // rflags 166 "mov %%r15, 0x170(%[vmcb])\n\t" 167 "mov guest_regs, %%r15\n\t" // rax 168 "mov %%r15, 0x1f8(%[vmcb])\n\t" 169 LOAD_GPR_C 170 "vmrun %[vmcb_gpa]\n\t" 171 SAVE_GPR_C 172 "mov 0x170(%[vmcb]), %%r15\n\t" // rflags 173 "mov %%r15, rflags\n\t" 174 "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax 175 "mov %%r15, guest_regs\n\t" 176 "vmsave %[vmcb_gpa]\n\t" 177 : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) 178 : "r15", "memory"); 179 } 180 181 /* 182 * Open SEV_DEV_PATH if available, otherwise exit the entire program. 183 * 184 * Return: 185 * The opened file descriptor of /dev/sev. 186 */ 187 int open_sev_dev_path_or_exit(void) 188 { 189 return open_path_or_exit(SEV_DEV_PATH, 0); 190 } 191