1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2024 Ventana Micro Systems Inc. 4 */ 5 6 #include <linux/kvm_host.h> 7 8 #include <asm/kvm_vcpu_sbi.h> 9 #include <asm/sbi.h> 10 11 static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 12 struct kvm_vcpu_sbi_return *retdata) 13 { 14 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 15 struct kvm_cpu_context *reset_cntx; 16 unsigned long funcid = cp->a6; 17 unsigned long hva, i; 18 struct kvm_vcpu *tmp; 19 20 switch (funcid) { 21 case SBI_EXT_SUSP_SYSTEM_SUSPEND: 22 if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { 23 retdata->err_val = SBI_ERR_INVALID_PARAM; 24 return 0; 25 } 26 27 if (!(cp->sstatus & SR_SPP)) { 28 retdata->err_val = SBI_ERR_FAILURE; 29 return 0; 30 } 31 32 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, cp->a1 >> PAGE_SHIFT, NULL); 33 if (kvm_is_error_hva(hva)) { 34 retdata->err_val = SBI_ERR_INVALID_ADDRESS; 35 return 0; 36 } 37 38 kvm_for_each_vcpu(i, tmp, vcpu->kvm) { 39 if (tmp == vcpu) 40 continue; 41 if (!kvm_riscv_vcpu_stopped(tmp)) { 42 retdata->err_val = SBI_ERR_DENIED; 43 return 0; 44 } 45 } 46 47 spin_lock(&vcpu->arch.reset_cntx_lock); 48 reset_cntx = &vcpu->arch.guest_reset_context; 49 reset_cntx->sepc = cp->a1; 50 reset_cntx->a0 = vcpu->vcpu_id; 51 reset_cntx->a1 = cp->a2; 52 spin_unlock(&vcpu->arch.reset_cntx_lock); 53 54 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); 55 56 /* userspace provides the suspend implementation */ 57 kvm_riscv_vcpu_sbi_forward(vcpu, run); 58 retdata->uexit = true; 59 break; 60 default: 61 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 62 break; 63 } 64 65 return 0; 66 } 67 68 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp = { 69 .extid_start = SBI_EXT_SUSP, 70 .extid_end = SBI_EXT_SUSP, 71 .default_disabled = true, 72 .handler = kvm_sbi_ext_susp_handler, 73 }; 74