1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/kvm_host.h>
10 #include <asm/csr.h>
11 #include <asm/insn-def.h>
12 #include <asm/kvm_mmu.h>
13 #include <asm/kvm_nacl.h>
14
gstage_page_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)15 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
16 struct kvm_cpu_trap *trap)
17 {
18 struct kvm_gstage_mapping host_map;
19 struct kvm_memory_slot *memslot;
20 unsigned long hva, fault_addr;
21 bool writable;
22 gfn_t gfn;
23 int ret;
24
25 fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
26 gfn = fault_addr >> PAGE_SHIFT;
27 memslot = gfn_to_memslot(vcpu->kvm, gfn);
28 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
29
30 if (kvm_is_error_hva(hva) ||
31 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
32 switch (trap->scause) {
33 case EXC_LOAD_GUEST_PAGE_FAULT:
34 return kvm_riscv_vcpu_mmio_load(vcpu, run,
35 fault_addr,
36 trap->htinst);
37 case EXC_STORE_GUEST_PAGE_FAULT:
38 return kvm_riscv_vcpu_mmio_store(vcpu, run,
39 fault_addr,
40 trap->htinst);
41 default:
42 return -EOPNOTSUPP;
43 };
44 }
45
46 ret = kvm_riscv_mmu_map(vcpu, memslot, fault_addr, hva,
47 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false,
48 &host_map);
49 if (ret < 0)
50 return ret;
51
52 return 1;
53 }
54
55 /**
56 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
57 *
58 * @vcpu: The VCPU pointer
59 * @read_insn: Flag representing whether we are reading instruction
60 * @guest_addr: Guest address to read
61 * @trap: Output pointer to trap details
62 */
kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu * vcpu,bool read_insn,unsigned long guest_addr,struct kvm_cpu_trap * trap)63 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
64 bool read_insn,
65 unsigned long guest_addr,
66 struct kvm_cpu_trap *trap)
67 {
68 register unsigned long taddr asm("a0") = (unsigned long)trap;
69 register unsigned long ttmp asm("a1");
70 unsigned long flags, val, tmp, old_stvec, old_hstatus;
71
72 local_irq_save(flags);
73
74 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
75 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
76
77 if (read_insn) {
78 /*
79 * HLVX.HU instruction
80 * 0110010 00011 rs1 100 rd 1110011
81 */
82 asm volatile ("\n"
83 ".option push\n"
84 ".option norvc\n"
85 "add %[ttmp], %[taddr], 0\n"
86 HLVX_HU(%[val], %[addr])
87 "andi %[tmp], %[val], 3\n"
88 "addi %[tmp], %[tmp], -3\n"
89 "bne %[tmp], zero, 2f\n"
90 "addi %[addr], %[addr], 2\n"
91 HLVX_HU(%[tmp], %[addr])
92 "sll %[tmp], %[tmp], 16\n"
93 "add %[val], %[val], %[tmp]\n"
94 "2:\n"
95 ".option pop"
96 : [val] "=&r" (val), [tmp] "=&r" (tmp),
97 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
98 [addr] "+&r" (guest_addr) : : "memory");
99
100 if (trap->scause == EXC_LOAD_PAGE_FAULT)
101 trap->scause = EXC_INST_PAGE_FAULT;
102 } else {
103 /*
104 * HLV.D instruction
105 * 0110110 00000 rs1 100 rd 1110011
106 *
107 * HLV.W instruction
108 * 0110100 00000 rs1 100 rd 1110011
109 */
110 asm volatile ("\n"
111 ".option push\n"
112 ".option norvc\n"
113 "add %[ttmp], %[taddr], 0\n"
114 #ifdef CONFIG_64BIT
115 HLV_D(%[val], %[addr])
116 #else
117 HLV_W(%[val], %[addr])
118 #endif
119 ".option pop"
120 : [val] "=&r" (val),
121 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
122 : [addr] "r" (guest_addr) : "memory");
123 }
124
125 csr_write(CSR_STVEC, old_stvec);
126 csr_write(CSR_HSTATUS, old_hstatus);
127
128 local_irq_restore(flags);
129
130 return val;
131 }
132
133 /**
134 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
135 *
136 * @vcpu: The VCPU pointer
137 * @trap: Trap details
138 */
kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)139 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
140 struct kvm_cpu_trap *trap)
141 {
142 unsigned long vsstatus = ncsr_read(CSR_VSSTATUS);
143
144 /* Change Guest SSTATUS.SPP bit */
145 vsstatus &= ~SR_SPP;
146 if (vcpu->arch.guest_context.sstatus & SR_SPP)
147 vsstatus |= SR_SPP;
148
149 /* Change Guest SSTATUS.SPIE bit */
150 vsstatus &= ~SR_SPIE;
151 if (vsstatus & SR_SIE)
152 vsstatus |= SR_SPIE;
153
154 /* Clear Guest SSTATUS.SIE bit */
155 vsstatus &= ~SR_SIE;
156
157 /* Update Guest SSTATUS */
158 ncsr_write(CSR_VSSTATUS, vsstatus);
159
160 /* Update Guest SCAUSE, STVAL, and SEPC */
161 ncsr_write(CSR_VSCAUSE, trap->scause);
162 ncsr_write(CSR_VSTVAL, trap->stval);
163 ncsr_write(CSR_VSEPC, trap->sepc);
164
165 /* Set Guest PC to Guest exception vector */
166 vcpu->arch.guest_context.sepc = ncsr_read(CSR_VSTVEC);
167
168 /* Set Guest privilege mode to supervisor */
169 vcpu->arch.guest_context.sstatus |= SR_SPP;
170 }
171
vcpu_redirect(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)172 static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap)
173 {
174 int ret = -EFAULT;
175
176 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
177 kvm_riscv_vcpu_trap_redirect(vcpu, trap);
178 ret = 1;
179 }
180 return ret;
181 }
182
183 /*
184 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
185 * proper exit to userspace.
186 */
kvm_riscv_vcpu_exit(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)187 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 struct kvm_cpu_trap *trap)
189 {
190 int ret;
191
192 /* If we got host interrupt then do nothing */
193 if (trap->scause & CAUSE_IRQ_FLAG)
194 return 1;
195
196 /* Handle guest traps */
197 ret = -EFAULT;
198 run->exit_reason = KVM_EXIT_UNKNOWN;
199 switch (trap->scause) {
200 case EXC_INST_ILLEGAL:
201 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN);
202 vcpu->stat.instr_illegal_exits++;
203 ret = vcpu_redirect(vcpu, trap);
204 break;
205 case EXC_LOAD_MISALIGNED:
206 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD);
207 vcpu->stat.load_misaligned_exits++;
208 ret = vcpu_redirect(vcpu, trap);
209 break;
210 case EXC_STORE_MISALIGNED:
211 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE);
212 vcpu->stat.store_misaligned_exits++;
213 ret = vcpu_redirect(vcpu, trap);
214 break;
215 case EXC_LOAD_ACCESS:
216 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD);
217 vcpu->stat.load_access_exits++;
218 ret = vcpu_redirect(vcpu, trap);
219 break;
220 case EXC_STORE_ACCESS:
221 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE);
222 vcpu->stat.store_access_exits++;
223 ret = vcpu_redirect(vcpu, trap);
224 break;
225 case EXC_INST_ACCESS:
226 ret = vcpu_redirect(vcpu, trap);
227 break;
228 case EXC_VIRTUAL_INST_FAULT:
229 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
230 ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
231 break;
232 case EXC_INST_GUEST_PAGE_FAULT:
233 case EXC_LOAD_GUEST_PAGE_FAULT:
234 case EXC_STORE_GUEST_PAGE_FAULT:
235 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
236 ret = gstage_page_fault(vcpu, run, trap);
237 break;
238 case EXC_SUPERVISOR_SYSCALL:
239 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
240 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
241 break;
242 case EXC_BREAKPOINT:
243 run->exit_reason = KVM_EXIT_DEBUG;
244 ret = 0;
245 break;
246 default:
247 break;
248 }
249
250 /* Print details in-case of error */
251 if (ret < 0) {
252 kvm_err("VCPU exit error %d\n", ret);
253 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
254 vcpu->arch.guest_context.sepc,
255 vcpu->arch.guest_context.sstatus,
256 vcpu->arch.guest_context.hstatus);
257 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
258 trap->scause, trap->stval, trap->htval, trap->htinst);
259 }
260
261 return ret;
262 }
263