xref: /linux/arch/riscv/kvm/vcpu_vector.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022 SiFive
4  *
5  * Authors:
6  *     Vincent Chen <vincent.chen@sifive.com>
7  *     Greentime Hu <greentime.hu@sifive.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpufeature.h>
15 #include <asm/kvm_isa.h>
16 #include <asm/kvm_vcpu_vector.h>
17 #include <asm/vector.h>
18 
19 #ifdef CONFIG_RISCV_ISA_V
20 void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
21 {
22 	unsigned long *isa = vcpu->arch.isa;
23 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
24 
25 	cntx->sstatus &= ~SR_VS;
26 
27 	cntx->vector.vlenb = riscv_v_vsize / 32;
28 
29 	if (riscv_isa_extension_available(isa, v)) {
30 		cntx->sstatus |= SR_VS_INITIAL;
31 		WARN_ON(!cntx->vector.datap);
32 		memset(cntx->vector.datap, 0, riscv_v_vsize);
33 	} else {
34 		cntx->sstatus |= SR_VS_OFF;
35 	}
36 }
37 
38 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
39 {
40 	cntx->sstatus &= ~SR_VS;
41 	cntx->sstatus |= SR_VS_CLEAN;
42 }
43 
44 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
45 				      unsigned long *isa)
46 {
47 	if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
48 		if (riscv_isa_extension_available(isa, v))
49 			__kvm_riscv_vector_save(cntx);
50 		kvm_riscv_vcpu_vector_clean(cntx);
51 	}
52 }
53 
54 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
55 					 unsigned long *isa)
56 {
57 	if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
58 		if (riscv_isa_extension_available(isa, v))
59 			__kvm_riscv_vector_restore(cntx);
60 		kvm_riscv_vcpu_vector_clean(cntx);
61 	}
62 }
63 
64 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
65 {
66 	/* No need to check host sstatus as it can be modified outside */
67 	if (!kvm_riscv_isa_check_host(V))
68 		__kvm_riscv_vector_save(cntx);
69 }
70 
71 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
72 {
73 	if (!kvm_riscv_isa_check_host(V))
74 		__kvm_riscv_vector_restore(cntx);
75 }
76 
77 int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
78 {
79 	vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
80 	if (!vcpu->arch.guest_context.vector.datap)
81 		return -ENOMEM;
82 
83 	vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
84 	if (!vcpu->arch.host_context.vector.datap) {
85 		kfree(vcpu->arch.guest_context.vector.datap);
86 		vcpu->arch.guest_context.vector.datap = NULL;
87 		return -ENOMEM;
88 	}
89 
90 	return 0;
91 }
92 
93 void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
94 {
95 	kfree(vcpu->arch.guest_context.vector.datap);
96 	kfree(vcpu->arch.host_context.vector.datap);
97 }
98 #endif
99 
100 static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
101 				    unsigned long reg_num,
102 				    size_t reg_size,
103 				    void **reg_addr)
104 {
105 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
106 	size_t vlenb = riscv_v_vsize / 32;
107 
108 	if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
109 		if (reg_size != sizeof(unsigned long))
110 			return -EINVAL;
111 		switch (reg_num) {
112 		case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
113 			*reg_addr = &cntx->vector.vstart;
114 			break;
115 		case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
116 			*reg_addr = &cntx->vector.vl;
117 			break;
118 		case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
119 			*reg_addr = &cntx->vector.vtype;
120 			break;
121 		case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
122 			*reg_addr = &cntx->vector.vcsr;
123 			break;
124 		case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
125 			*reg_addr = &cntx->vector.vlenb;
126 			break;
127 		case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
128 		default:
129 			return -ENOENT;
130 		}
131 	} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
132 		if (reg_size != vlenb)
133 			return -EINVAL;
134 		WARN_ON(!cntx->vector.datap);
135 		*reg_addr = cntx->vector.datap +
136 			    (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
137 	} else {
138 		return -ENOENT;
139 	}
140 
141 	return 0;
142 }
143 
144 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
145 				  const struct kvm_one_reg *reg)
146 {
147 	unsigned long *isa = vcpu->arch.isa;
148 	unsigned long __user *uaddr =
149 			(unsigned long __user *)(unsigned long)reg->addr;
150 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
151 					    KVM_REG_SIZE_MASK |
152 					    KVM_REG_RISCV_VECTOR);
153 	size_t reg_size = KVM_REG_SIZE(reg->id);
154 	void *reg_addr;
155 	int rc;
156 
157 	if (!riscv_isa_extension_available(isa, v))
158 		return -ENOENT;
159 
160 	rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
161 	if (rc)
162 		return rc;
163 
164 	if (copy_to_user(uaddr, reg_addr, reg_size))
165 		return -EFAULT;
166 
167 	return 0;
168 }
169 
170 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
171 				  const struct kvm_one_reg *reg)
172 {
173 	unsigned long *isa = vcpu->arch.isa;
174 	unsigned long __user *uaddr =
175 			(unsigned long __user *)(unsigned long)reg->addr;
176 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
177 					    KVM_REG_SIZE_MASK |
178 					    KVM_REG_RISCV_VECTOR);
179 	size_t reg_size = KVM_REG_SIZE(reg->id);
180 	void *reg_addr;
181 	int rc;
182 
183 	if (!riscv_isa_extension_available(isa, v))
184 		return -ENOENT;
185 
186 	if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
187 		struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
188 		unsigned long reg_val;
189 
190 		if (reg_size != sizeof(reg_val))
191 			return -EINVAL;
192 		if (copy_from_user(&reg_val, uaddr, reg_size))
193 			return -EFAULT;
194 		if (reg_val != cntx->vector.vlenb)
195 			return -EINVAL;
196 
197 		return 0;
198 	}
199 
200 	rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
201 	if (rc)
202 		return rc;
203 
204 	if (copy_from_user(reg_addr, uaddr, reg_size))
205 		return -EFAULT;
206 
207 	return 0;
208 }
209