xref: /linux/arch/riscv/kvm/vcpu_config.c (revision 01f492e1817e858d1712f2489d0afbaa552f417b)
1*6ed523e2SAnup Patel // SPDX-License-Identifier: GPL-2.0
2*6ed523e2SAnup Patel /*
3*6ed523e2SAnup Patel  * Copyright (c) 2026 Qualcomm Technologies, Inc.
4*6ed523e2SAnup Patel  */
5*6ed523e2SAnup Patel 
6*6ed523e2SAnup Patel #include <linux/kvm_host.h>
7*6ed523e2SAnup Patel #include <asm/kvm_nacl.h>
8*6ed523e2SAnup Patel 
9*6ed523e2SAnup Patel #define KVM_HEDELEG_DEFAULT	(BIT(EXC_INST_MISALIGNED) | \
10*6ed523e2SAnup Patel 				 BIT(EXC_INST_ILLEGAL)     | \
11*6ed523e2SAnup Patel 				 BIT(EXC_BREAKPOINT)      | \
12*6ed523e2SAnup Patel 				 BIT(EXC_SYSCALL)         | \
13*6ed523e2SAnup Patel 				 BIT(EXC_INST_PAGE_FAULT) | \
14*6ed523e2SAnup Patel 				 BIT(EXC_LOAD_PAGE_FAULT) | \
15*6ed523e2SAnup Patel 				 BIT(EXC_STORE_PAGE_FAULT))
16*6ed523e2SAnup Patel 
17*6ed523e2SAnup Patel #define KVM_HIDELEG_DEFAULT	(BIT(IRQ_VS_SOFT)  | \
18*6ed523e2SAnup Patel 				 BIT(IRQ_VS_TIMER) | \
19*6ed523e2SAnup Patel 				 BIT(IRQ_VS_EXT))
20*6ed523e2SAnup Patel 
21*6ed523e2SAnup Patel void kvm_riscv_vcpu_config_init(struct kvm_vcpu *vcpu)
22*6ed523e2SAnup Patel {
23*6ed523e2SAnup Patel 	vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT;
24*6ed523e2SAnup Patel 	vcpu->arch.cfg.hideleg = KVM_HIDELEG_DEFAULT;
25*6ed523e2SAnup Patel }
26*6ed523e2SAnup Patel 
27*6ed523e2SAnup Patel void kvm_riscv_vcpu_config_guest_debug(struct kvm_vcpu *vcpu)
28*6ed523e2SAnup Patel {
29*6ed523e2SAnup Patel 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
30*6ed523e2SAnup Patel 
31*6ed523e2SAnup Patel 	if (vcpu->guest_debug)
32*6ed523e2SAnup Patel 		cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
33*6ed523e2SAnup Patel 	else
34*6ed523e2SAnup Patel 		cfg->hedeleg |= BIT(EXC_BREAKPOINT);
35*6ed523e2SAnup Patel 
36*6ed523e2SAnup Patel 	vcpu->arch.csr_dirty = true;
37*6ed523e2SAnup Patel }
38*6ed523e2SAnup Patel 
39*6ed523e2SAnup Patel void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu)
40*6ed523e2SAnup Patel {
41*6ed523e2SAnup Patel 	const unsigned long *isa = vcpu->arch.isa;
42*6ed523e2SAnup Patel 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
43*6ed523e2SAnup Patel 
44*6ed523e2SAnup Patel 	if (riscv_isa_extension_available(isa, SVPBMT))
45*6ed523e2SAnup Patel 		cfg->henvcfg |= ENVCFG_PBMTE;
46*6ed523e2SAnup Patel 
47*6ed523e2SAnup Patel 	if (riscv_isa_extension_available(isa, SSTC))
48*6ed523e2SAnup Patel 		cfg->henvcfg |= ENVCFG_STCE;
49*6ed523e2SAnup Patel 
50*6ed523e2SAnup Patel 	if (riscv_isa_extension_available(isa, ZICBOM))
51*6ed523e2SAnup Patel 		cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
52*6ed523e2SAnup Patel 
53*6ed523e2SAnup Patel 	if (riscv_isa_extension_available(isa, ZICBOZ))
54*6ed523e2SAnup Patel 		cfg->henvcfg |= ENVCFG_CBZE;
55*6ed523e2SAnup Patel 
56*6ed523e2SAnup Patel 	if (riscv_isa_extension_available(isa, SVADU) &&
57*6ed523e2SAnup Patel 	    !riscv_isa_extension_available(isa, SVADE))
58*6ed523e2SAnup Patel 		cfg->henvcfg |= ENVCFG_ADUE;
59*6ed523e2SAnup Patel 
60*6ed523e2SAnup Patel 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
61*6ed523e2SAnup Patel 		cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
62*6ed523e2SAnup Patel 		if (riscv_isa_extension_available(isa, SSAIA))
63*6ed523e2SAnup Patel 			cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
64*6ed523e2SAnup Patel 					  SMSTATEEN0_AIA |
65*6ed523e2SAnup Patel 					  SMSTATEEN0_AIA_ISEL;
66*6ed523e2SAnup Patel 		if (riscv_isa_extension_available(isa, SMSTATEEN))
67*6ed523e2SAnup Patel 			cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
68*6ed523e2SAnup Patel 	}
69*6ed523e2SAnup Patel 
70*6ed523e2SAnup Patel 	if (vcpu->guest_debug)
71*6ed523e2SAnup Patel 		cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
72*6ed523e2SAnup Patel }
73*6ed523e2SAnup Patel 
74*6ed523e2SAnup Patel void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu)
75*6ed523e2SAnup Patel {
76*6ed523e2SAnup Patel 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
77*6ed523e2SAnup Patel 	void *nsh;
78*6ed523e2SAnup Patel 
79*6ed523e2SAnup Patel 	if (kvm_riscv_nacl_sync_csr_available()) {
80*6ed523e2SAnup Patel 		nsh = nacl_shmem();
81*6ed523e2SAnup Patel 		nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
82*6ed523e2SAnup Patel 		nacl_csr_write(nsh, CSR_HIDELEG, cfg->hideleg);
83*6ed523e2SAnup Patel 		nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
84*6ed523e2SAnup Patel 		if (IS_ENABLED(CONFIG_32BIT))
85*6ed523e2SAnup Patel 			nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
86*6ed523e2SAnup Patel 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
87*6ed523e2SAnup Patel 			nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
88*6ed523e2SAnup Patel 			if (IS_ENABLED(CONFIG_32BIT))
89*6ed523e2SAnup Patel 				nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
90*6ed523e2SAnup Patel 		}
91*6ed523e2SAnup Patel 	} else {
92*6ed523e2SAnup Patel 		csr_write(CSR_HEDELEG, cfg->hedeleg);
93*6ed523e2SAnup Patel 		csr_write(CSR_HIDELEG, cfg->hideleg);
94*6ed523e2SAnup Patel 		csr_write(CSR_HENVCFG, cfg->henvcfg);
95*6ed523e2SAnup Patel 		if (IS_ENABLED(CONFIG_32BIT))
96*6ed523e2SAnup Patel 			csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
97*6ed523e2SAnup Patel 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
98*6ed523e2SAnup Patel 			csr_write(CSR_HSTATEEN0, cfg->hstateen0);
99*6ed523e2SAnup Patel 			if (IS_ENABLED(CONFIG_32BIT))
100*6ed523e2SAnup Patel 				csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
101*6ed523e2SAnup Patel 		}
102*6ed523e2SAnup Patel 	}
103*6ed523e2SAnup Patel }
104