1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2021 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Atish Patra <atish.patra@wdc.com> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <asm/sbi.h> 13 #include <asm/kvm_vcpu_timer.h> 14 #include <asm/kvm_vcpu_sbi.h> 15 16 static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 17 struct kvm_vcpu_sbi_return *retdata) 18 { 19 ulong hmask; 20 int i, ret = 0; 21 u64 next_cycle; 22 struct kvm_vcpu *rvcpu; 23 struct kvm *kvm = vcpu->kvm; 24 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 25 struct kvm_cpu_trap *utrap = retdata->utrap; 26 unsigned long vmid; 27 28 switch (cp->a7) { 29 case SBI_EXT_0_1_CONSOLE_GETCHAR: 30 case SBI_EXT_0_1_CONSOLE_PUTCHAR: 31 /* 32 * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be 33 * handled in kernel so we forward these to user-space 34 */ 35 kvm_riscv_vcpu_sbi_forward(vcpu, run); 36 retdata->uexit = true; 37 break; 38 case SBI_EXT_0_1_SET_TIMER: 39 #if __riscv_xlen == 32 40 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; 41 #else 42 next_cycle = (u64)cp->a0; 43 #endif 44 ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); 45 break; 46 case SBI_EXT_0_1_CLEAR_IPI: 47 ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); 48 break; 49 case SBI_EXT_0_1_SEND_IPI: 50 if (cp->a0) 51 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); 52 else 53 hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; 54 if (utrap->scause) 55 break; 56 57 for_each_set_bit(i, &hmask, BITS_PER_LONG) { 58 rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); 59 ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); 60 if (ret < 0) 61 break; 62 } 63 break; 64 case SBI_EXT_0_1_SHUTDOWN: 65 kvm_riscv_vcpu_sbi_system_reset(vcpu, run, 66 KVM_SYSTEM_EVENT_SHUTDOWN, 0); 67 retdata->uexit = true; 68 break; 69 case SBI_EXT_0_1_REMOTE_FENCE_I: 70 case SBI_EXT_0_1_REMOTE_SFENCE_VMA: 71 case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: 72 if (cp->a0) 73 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); 74 else 75 hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; 76 if (utrap->scause) 77 break; 78 79 if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) 80 kvm_riscv_fence_i(vcpu->kvm, 0, hmask); 81 else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) { 82 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); 83 if (cp->a1 == 0 && cp->a2 == 0) 84 kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid); 85 else 86 kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1, 87 cp->a2, PAGE_SHIFT, vmid); 88 } else { 89 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); 90 if (cp->a1 == 0 && cp->a2 == 0) 91 kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask, 92 cp->a3, vmid); 93 else 94 kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask, 95 cp->a1, cp->a2, PAGE_SHIFT, 96 cp->a3, vmid); 97 } 98 break; 99 default: 100 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 101 break; 102 } 103 104 return ret; 105 } 106 107 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { 108 .extid_start = SBI_EXT_0_1_SET_TIMER, 109 .extid_end = SBI_EXT_0_1_SHUTDOWN, 110 .handler = kvm_sbi_ext_v01_handler, 111 }; 112