xref: /linux/arch/riscv/kvm/vcpu_fp.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  *     Anup Patel <anup.patel@wdc.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/nospec.h>
14 #include <linux/uaccess.h>
15 #include <asm/cpufeature.h>
16 #include <asm/kvm_isa.h>
17 
18 #ifdef CONFIG_FPU
19 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
20 {
21 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
22 
23 	cntx->sstatus &= ~SR_FS;
24 	if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
25 	    riscv_isa_extension_available(vcpu->arch.isa, d))
26 		cntx->sstatus |= SR_FS_INITIAL;
27 	else
28 		cntx->sstatus |= SR_FS_OFF;
29 }
30 
31 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
32 {
33 	cntx->sstatus &= ~SR_FS;
34 	cntx->sstatus |= SR_FS_CLEAN;
35 }
36 
37 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
38 				  const unsigned long *isa)
39 {
40 	if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
41 		if (riscv_isa_extension_available(isa, d))
42 			__kvm_riscv_fp_d_save(cntx);
43 		else if (riscv_isa_extension_available(isa, f))
44 			__kvm_riscv_fp_f_save(cntx);
45 		kvm_riscv_vcpu_fp_clean(cntx);
46 	}
47 }
48 
49 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
50 				     const unsigned long *isa)
51 {
52 	if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
53 		if (riscv_isa_extension_available(isa, d))
54 			__kvm_riscv_fp_d_restore(cntx);
55 		else if (riscv_isa_extension_available(isa, f))
56 			__kvm_riscv_fp_f_restore(cntx);
57 		kvm_riscv_vcpu_fp_clean(cntx);
58 	}
59 }
60 
61 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
62 {
63 	/* No need to check host sstatus as it can be modified outside */
64 	if (!kvm_riscv_isa_check_host(D))
65 		__kvm_riscv_fp_d_save(cntx);
66 	else if (!kvm_riscv_isa_check_host(F))
67 		__kvm_riscv_fp_f_save(cntx);
68 }
69 
70 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
71 {
72 	if (!kvm_riscv_isa_check_host(D))
73 		__kvm_riscv_fp_d_restore(cntx);
74 	else if (!kvm_riscv_isa_check_host(F))
75 		__kvm_riscv_fp_f_restore(cntx);
76 }
77 #endif
78 
79 int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
80 			      const struct kvm_one_reg *reg,
81 			      unsigned long rtype)
82 {
83 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
84 	unsigned long __user *uaddr =
85 			(unsigned long __user *)(unsigned long)reg->addr;
86 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
87 					    KVM_REG_SIZE_MASK |
88 					    rtype);
89 	void *reg_val;
90 
91 	if ((rtype == KVM_REG_RISCV_FP_F) &&
92 	    riscv_isa_extension_available(vcpu->arch.isa, f)) {
93 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
94 			return -EINVAL;
95 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
96 			reg_val = &cntx->fp.f.fcsr;
97 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
98 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
99 			reg_num = array_index_nospec(reg_num,
100 					ARRAY_SIZE(cntx->fp.f.f));
101 			reg_val = &cntx->fp.f.f[reg_num];
102 		} else
103 			return -ENOENT;
104 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
105 		   riscv_isa_extension_available(vcpu->arch.isa, d)) {
106 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
107 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
108 				return -EINVAL;
109 			reg_val = &cntx->fp.d.fcsr;
110 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
111 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
112 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
113 				return -EINVAL;
114 			reg_num = array_index_nospec(reg_num,
115 					ARRAY_SIZE(cntx->fp.d.f));
116 			reg_val = &cntx->fp.d.f[reg_num];
117 		} else
118 			return -ENOENT;
119 	} else
120 		return -ENOENT;
121 
122 	if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
123 		return -EFAULT;
124 
125 	return 0;
126 }
127 
128 int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
129 			      const struct kvm_one_reg *reg,
130 			      unsigned long rtype)
131 {
132 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
133 	unsigned long __user *uaddr =
134 			(unsigned long __user *)(unsigned long)reg->addr;
135 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
136 					    KVM_REG_SIZE_MASK |
137 					    rtype);
138 	void *reg_val;
139 
140 	if ((rtype == KVM_REG_RISCV_FP_F) &&
141 	    riscv_isa_extension_available(vcpu->arch.isa, f)) {
142 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
143 			return -EINVAL;
144 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
145 			reg_val = &cntx->fp.f.fcsr;
146 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
147 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
148 			reg_num = array_index_nospec(reg_num,
149 					ARRAY_SIZE(cntx->fp.f.f));
150 			reg_val = &cntx->fp.f.f[reg_num];
151 		} else
152 			return -ENOENT;
153 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
154 		   riscv_isa_extension_available(vcpu->arch.isa, d)) {
155 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
156 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
157 				return -EINVAL;
158 			reg_val = &cntx->fp.d.fcsr;
159 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
160 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
161 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
162 				return -EINVAL;
163 			reg_num = array_index_nospec(reg_num,
164 					ARRAY_SIZE(cntx->fp.d.f));
165 			reg_val = &cntx->fp.d.f[reg_num];
166 		} else
167 			return -ENOENT;
168 	} else
169 		return -ENOENT;
170 
171 	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
172 		return -EFAULT;
173 
174 	return 0;
175 }
176