1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #include <linux/kvm_host.h> 10 #include <asm/csr.h> 11 #include <asm/insn-def.h> 12 13 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, 14 struct kvm_cpu_trap *trap) 15 { 16 struct kvm_memory_slot *memslot; 17 unsigned long hva, fault_addr; 18 bool writable; 19 gfn_t gfn; 20 int ret; 21 22 fault_addr = (trap->htval << 2) | (trap->stval & 0x3); 23 gfn = fault_addr >> PAGE_SHIFT; 24 memslot = gfn_to_memslot(vcpu->kvm, gfn); 25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); 26 27 if (kvm_is_error_hva(hva) || 28 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) { 29 switch (trap->scause) { 30 case EXC_LOAD_GUEST_PAGE_FAULT: 31 return kvm_riscv_vcpu_mmio_load(vcpu, run, 32 fault_addr, 33 trap->htinst); 34 case EXC_STORE_GUEST_PAGE_FAULT: 35 return kvm_riscv_vcpu_mmio_store(vcpu, run, 36 fault_addr, 37 trap->htinst); 38 default: 39 return -EOPNOTSUPP; 40 }; 41 } 42 43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva, 44 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false); 45 if (ret < 0) 46 return ret; 47 48 return 1; 49 } 50 51 /** 52 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory 53 * 54 * @vcpu: The VCPU pointer 55 * @read_insn: Flag representing whether we are reading instruction 56 * @guest_addr: Guest address to read 57 * @trap: Output pointer to trap details 58 */ 59 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, 60 bool read_insn, 61 unsigned long guest_addr, 62 struct kvm_cpu_trap *trap) 63 { 64 register unsigned long taddr asm("a0") = (unsigned long)trap; 65 register unsigned long ttmp asm("a1"); 66 unsigned long flags, val, tmp, old_stvec, old_hstatus; 67 68 local_irq_save(flags); 69 70 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus); 71 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap); 72 73 if (read_insn) { 74 /* 75 * HLVX.HU instruction 76 * 0110010 00011 rs1 100 rd 1110011 77 */ 78 asm volatile ("\n" 79 ".option push\n" 80 ".option norvc\n" 81 "add %[ttmp], %[taddr], 0\n" 82 HLVX_HU(%[val], %[addr]) 83 "andi %[tmp], %[val], 3\n" 84 "addi %[tmp], %[tmp], -3\n" 85 "bne %[tmp], zero, 2f\n" 86 "addi %[addr], %[addr], 2\n" 87 HLVX_HU(%[tmp], %[addr]) 88 "sll %[tmp], %[tmp], 16\n" 89 "add %[val], %[val], %[tmp]\n" 90 "2:\n" 91 ".option pop" 92 : [val] "=&r" (val), [tmp] "=&r" (tmp), 93 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp), 94 [addr] "+&r" (guest_addr) : : "memory"); 95 96 if (trap->scause == EXC_LOAD_PAGE_FAULT) 97 trap->scause = EXC_INST_PAGE_FAULT; 98 } else { 99 /* 100 * HLV.D instruction 101 * 0110110 00000 rs1 100 rd 1110011 102 * 103 * HLV.W instruction 104 * 0110100 00000 rs1 100 rd 1110011 105 */ 106 asm volatile ("\n" 107 ".option push\n" 108 ".option norvc\n" 109 "add %[ttmp], %[taddr], 0\n" 110 #ifdef CONFIG_64BIT 111 HLV_D(%[val], %[addr]) 112 #else 113 HLV_W(%[val], %[addr]) 114 #endif 115 ".option pop" 116 : [val] "=&r" (val), 117 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp) 118 : [addr] "r" (guest_addr) : "memory"); 119 } 120 121 csr_write(CSR_STVEC, old_stvec); 122 csr_write(CSR_HSTATUS, old_hstatus); 123 124 local_irq_restore(flags); 125 126 return val; 127 } 128 129 /** 130 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest 131 * 132 * @vcpu: The VCPU pointer 133 * @trap: Trap details 134 */ 135 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, 136 struct kvm_cpu_trap *trap) 137 { 138 unsigned long vsstatus = csr_read(CSR_VSSTATUS); 139 140 /* Change Guest SSTATUS.SPP bit */ 141 vsstatus &= ~SR_SPP; 142 if (vcpu->arch.guest_context.sstatus & SR_SPP) 143 vsstatus |= SR_SPP; 144 145 /* Change Guest SSTATUS.SPIE bit */ 146 vsstatus &= ~SR_SPIE; 147 if (vsstatus & SR_SIE) 148 vsstatus |= SR_SPIE; 149 150 /* Clear Guest SSTATUS.SIE bit */ 151 vsstatus &= ~SR_SIE; 152 153 /* Update Guest SSTATUS */ 154 csr_write(CSR_VSSTATUS, vsstatus); 155 156 /* Update Guest SCAUSE, STVAL, and SEPC */ 157 csr_write(CSR_VSCAUSE, trap->scause); 158 csr_write(CSR_VSTVAL, trap->stval); 159 csr_write(CSR_VSEPC, trap->sepc); 160 161 /* Set Guest PC to Guest exception vector */ 162 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); 163 } 164 165 /* 166 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 167 * proper exit to userspace. 168 */ 169 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 170 struct kvm_cpu_trap *trap) 171 { 172 int ret; 173 174 /* If we got host interrupt then do nothing */ 175 if (trap->scause & CAUSE_IRQ_FLAG) 176 return 1; 177 178 /* Handle guest traps */ 179 ret = -EFAULT; 180 run->exit_reason = KVM_EXIT_UNKNOWN; 181 switch (trap->scause) { 182 case EXC_VIRTUAL_INST_FAULT: 183 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 184 ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap); 185 break; 186 case EXC_INST_GUEST_PAGE_FAULT: 187 case EXC_LOAD_GUEST_PAGE_FAULT: 188 case EXC_STORE_GUEST_PAGE_FAULT: 189 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 190 ret = gstage_page_fault(vcpu, run, trap); 191 break; 192 case EXC_SUPERVISOR_SYSCALL: 193 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 194 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run); 195 break; 196 default: 197 break; 198 } 199 200 /* Print details in-case of error */ 201 if (ret < 0) { 202 kvm_err("VCPU exit error %d\n", ret); 203 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n", 204 vcpu->arch.guest_context.sepc, 205 vcpu->arch.guest_context.sstatus, 206 vcpu->arch.guest_context.hstatus); 207 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n", 208 trap->scause, trap->stval, trap->htval, trap->htinst); 209 } 210 211 return ret; 212 } 213