1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2026 Qualcomm Technologies, Inc. 4 */ 5 6 #include <linux/kvm_host.h> 7 #include <asm/kvm_nacl.h> 8 9 #define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \ 10 BIT(EXC_INST_ILLEGAL) | \ 11 BIT(EXC_BREAKPOINT) | \ 12 BIT(EXC_SYSCALL) | \ 13 BIT(EXC_INST_PAGE_FAULT) | \ 14 BIT(EXC_LOAD_PAGE_FAULT) | \ 15 BIT(EXC_STORE_PAGE_FAULT)) 16 17 #define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \ 18 BIT(IRQ_VS_TIMER) | \ 19 BIT(IRQ_VS_EXT)) 20 21 void kvm_riscv_vcpu_config_init(struct kvm_vcpu *vcpu) 22 { 23 vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT; 24 vcpu->arch.cfg.hideleg = KVM_HIDELEG_DEFAULT; 25 } 26 27 void kvm_riscv_vcpu_config_guest_debug(struct kvm_vcpu *vcpu) 28 { 29 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; 30 31 if (vcpu->guest_debug) 32 cfg->hedeleg &= ~BIT(EXC_BREAKPOINT); 33 else 34 cfg->hedeleg |= BIT(EXC_BREAKPOINT); 35 36 vcpu->arch.csr_dirty = true; 37 } 38 39 void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu) 40 { 41 const unsigned long *isa = vcpu->arch.isa; 42 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; 43 44 if (riscv_isa_extension_available(isa, SVPBMT)) 45 cfg->henvcfg |= ENVCFG_PBMTE; 46 47 if (riscv_isa_extension_available(isa, SSTC)) 48 cfg->henvcfg |= ENVCFG_STCE; 49 50 if (riscv_isa_extension_available(isa, ZICBOM)) 51 cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); 52 53 if (riscv_isa_extension_available(isa, ZICBOZ)) 54 cfg->henvcfg |= ENVCFG_CBZE; 55 56 if (riscv_isa_extension_available(isa, SVADU) && 57 !riscv_isa_extension_available(isa, SVADE)) 58 cfg->henvcfg |= ENVCFG_ADUE; 59 60 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { 61 cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; 62 if (riscv_isa_extension_available(isa, SSAIA)) 63 cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | 64 SMSTATEEN0_AIA | 65 SMSTATEEN0_AIA_ISEL; 66 if (riscv_isa_extension_available(isa, SMSTATEEN)) 67 cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; 68 } 69 70 if (vcpu->guest_debug) 71 cfg->hedeleg &= ~BIT(EXC_BREAKPOINT); 72 } 73 74 void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu) 75 { 76 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; 77 void *nsh; 78 79 if (kvm_riscv_nacl_sync_csr_available()) { 80 nsh = nacl_shmem(); 81 nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg); 82 nacl_csr_write(nsh, CSR_HIDELEG, cfg->hideleg); 83 nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg); 84 if (IS_ENABLED(CONFIG_32BIT)) 85 nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32); 86 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { 87 nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0); 88 if (IS_ENABLED(CONFIG_32BIT)) 89 nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32); 90 } 91 } else { 92 csr_write(CSR_HEDELEG, cfg->hedeleg); 93 csr_write(CSR_HIDELEG, cfg->hideleg); 94 csr_write(CSR_HENVCFG, cfg->henvcfg); 95 if (IS_ENABLED(CONFIG_32BIT)) 96 csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); 97 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { 98 csr_write(CSR_HSTATEEN0, cfg->hstateen0); 99 if (IS_ENABLED(CONFIG_32BIT)) 100 csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); 101 } 102 } 103 } 104