xref: /linux/arch/riscv/kvm/vcpu_fp.c (revision 3f276cece4dd9e8bf199d9bf3901eef8ca904c2d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  *     Anup Patel <anup.patel@wdc.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/nospec.h>
14 #include <linux/uaccess.h>
15 #include <asm/cpufeature.h>
16 
17 #ifdef CONFIG_FPU
18 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
19 {
20 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
21 
22 	cntx->sstatus &= ~SR_FS;
23 	if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
24 	    riscv_isa_extension_available(vcpu->arch.isa, d))
25 		cntx->sstatus |= SR_FS_INITIAL;
26 	else
27 		cntx->sstatus |= SR_FS_OFF;
28 }
29 
30 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
31 {
32 	cntx->sstatus &= ~SR_FS;
33 	cntx->sstatus |= SR_FS_CLEAN;
34 }
35 
36 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
37 				  const unsigned long *isa)
38 {
39 	if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
40 		if (riscv_isa_extension_available(isa, d))
41 			__kvm_riscv_fp_d_save(cntx);
42 		else if (riscv_isa_extension_available(isa, f))
43 			__kvm_riscv_fp_f_save(cntx);
44 		kvm_riscv_vcpu_fp_clean(cntx);
45 	}
46 }
47 
48 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
49 				     const unsigned long *isa)
50 {
51 	if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
52 		if (riscv_isa_extension_available(isa, d))
53 			__kvm_riscv_fp_d_restore(cntx);
54 		else if (riscv_isa_extension_available(isa, f))
55 			__kvm_riscv_fp_f_restore(cntx);
56 		kvm_riscv_vcpu_fp_clean(cntx);
57 	}
58 }
59 
60 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
61 {
62 	/* No need to check host sstatus as it can be modified outside */
63 	if (riscv_isa_extension_available(NULL, d))
64 		__kvm_riscv_fp_d_save(cntx);
65 	else if (riscv_isa_extension_available(NULL, f))
66 		__kvm_riscv_fp_f_save(cntx);
67 }
68 
69 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
70 {
71 	if (riscv_isa_extension_available(NULL, d))
72 		__kvm_riscv_fp_d_restore(cntx);
73 	else if (riscv_isa_extension_available(NULL, f))
74 		__kvm_riscv_fp_f_restore(cntx);
75 }
76 #endif
77 
78 int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
79 			      const struct kvm_one_reg *reg,
80 			      unsigned long rtype)
81 {
82 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
83 	unsigned long __user *uaddr =
84 			(unsigned long __user *)(unsigned long)reg->addr;
85 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
86 					    KVM_REG_SIZE_MASK |
87 					    rtype);
88 	void *reg_val;
89 
90 	if ((rtype == KVM_REG_RISCV_FP_F) &&
91 	    riscv_isa_extension_available(vcpu->arch.isa, f)) {
92 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
93 			return -EINVAL;
94 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
95 			reg_val = &cntx->fp.f.fcsr;
96 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
97 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
98 			reg_num = array_index_nospec(reg_num,
99 					ARRAY_SIZE(cntx->fp.f.f));
100 			reg_val = &cntx->fp.f.f[reg_num];
101 		} else
102 			return -ENOENT;
103 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
104 		   riscv_isa_extension_available(vcpu->arch.isa, d)) {
105 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
106 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
107 				return -EINVAL;
108 			reg_val = &cntx->fp.d.fcsr;
109 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
110 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
111 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
112 				return -EINVAL;
113 			reg_num = array_index_nospec(reg_num,
114 					ARRAY_SIZE(cntx->fp.d.f));
115 			reg_val = &cntx->fp.d.f[reg_num];
116 		} else
117 			return -ENOENT;
118 	} else
119 		return -ENOENT;
120 
121 	if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
122 		return -EFAULT;
123 
124 	return 0;
125 }
126 
127 int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
128 			      const struct kvm_one_reg *reg,
129 			      unsigned long rtype)
130 {
131 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
132 	unsigned long __user *uaddr =
133 			(unsigned long __user *)(unsigned long)reg->addr;
134 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
135 					    KVM_REG_SIZE_MASK |
136 					    rtype);
137 	void *reg_val;
138 
139 	if ((rtype == KVM_REG_RISCV_FP_F) &&
140 	    riscv_isa_extension_available(vcpu->arch.isa, f)) {
141 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
142 			return -EINVAL;
143 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
144 			reg_val = &cntx->fp.f.fcsr;
145 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
146 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
147 			reg_num = array_index_nospec(reg_num,
148 					ARRAY_SIZE(cntx->fp.f.f));
149 			reg_val = &cntx->fp.f.f[reg_num];
150 		} else
151 			return -ENOENT;
152 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
153 		   riscv_isa_extension_available(vcpu->arch.isa, d)) {
154 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
155 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
156 				return -EINVAL;
157 			reg_val = &cntx->fp.d.fcsr;
158 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
159 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
160 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
161 				return -EINVAL;
162 			reg_num = array_index_nospec(reg_num,
163 					ARRAY_SIZE(cntx->fp.d.f));
164 			reg_val = &cntx->fp.d.f[reg_num];
165 		} else
166 			return -ENOENT;
167 	} else
168 		return -ENOENT;
169 
170 	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
171 		return -EFAULT;
172 
173 	return 0;
174 }
175