xref: /linux/arch/riscv/kvm/vcpu_fp.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  *     Anup Patel <anup.patel@wdc.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14 #include <asm/hwcap.h>
15 
16 #ifdef CONFIG_FPU
17 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
18 {
19 	unsigned long isa = vcpu->arch.isa;
20 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
21 
22 	cntx->sstatus &= ~SR_FS;
23 	if (riscv_isa_extension_available(&isa, f) ||
24 	    riscv_isa_extension_available(&isa, d))
25 		cntx->sstatus |= SR_FS_INITIAL;
26 	else
27 		cntx->sstatus |= SR_FS_OFF;
28 }
29 
30 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
31 {
32 	cntx->sstatus &= ~SR_FS;
33 	cntx->sstatus |= SR_FS_CLEAN;
34 }
35 
36 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
37 				  unsigned long isa)
38 {
39 	if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
40 		if (riscv_isa_extension_available(&isa, d))
41 			__kvm_riscv_fp_d_save(cntx);
42 		else if (riscv_isa_extension_available(&isa, f))
43 			__kvm_riscv_fp_f_save(cntx);
44 		kvm_riscv_vcpu_fp_clean(cntx);
45 	}
46 }
47 
48 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
49 				     unsigned long isa)
50 {
51 	if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
52 		if (riscv_isa_extension_available(&isa, d))
53 			__kvm_riscv_fp_d_restore(cntx);
54 		else if (riscv_isa_extension_available(&isa, f))
55 			__kvm_riscv_fp_f_restore(cntx);
56 		kvm_riscv_vcpu_fp_clean(cntx);
57 	}
58 }
59 
60 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
61 {
62 	/* No need to check host sstatus as it can be modified outside */
63 	if (riscv_isa_extension_available(NULL, d))
64 		__kvm_riscv_fp_d_save(cntx);
65 	else if (riscv_isa_extension_available(NULL, f))
66 		__kvm_riscv_fp_f_save(cntx);
67 }
68 
69 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
70 {
71 	if (riscv_isa_extension_available(NULL, d))
72 		__kvm_riscv_fp_d_restore(cntx);
73 	else if (riscv_isa_extension_available(NULL, f))
74 		__kvm_riscv_fp_f_restore(cntx);
75 }
76 #endif
77 
78 int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
79 			      const struct kvm_one_reg *reg,
80 			      unsigned long rtype)
81 {
82 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
83 	unsigned long isa = vcpu->arch.isa;
84 	unsigned long __user *uaddr =
85 			(unsigned long __user *)(unsigned long)reg->addr;
86 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
87 					    KVM_REG_SIZE_MASK |
88 					    rtype);
89 	void *reg_val;
90 
91 	if ((rtype == KVM_REG_RISCV_FP_F) &&
92 	    riscv_isa_extension_available(&isa, f)) {
93 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
94 			return -EINVAL;
95 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
96 			reg_val = &cntx->fp.f.fcsr;
97 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
98 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
99 			reg_val = &cntx->fp.f.f[reg_num];
100 		else
101 			return -EINVAL;
102 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
103 		   riscv_isa_extension_available(&isa, d)) {
104 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
105 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
106 				return -EINVAL;
107 			reg_val = &cntx->fp.d.fcsr;
108 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
109 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
110 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
111 				return -EINVAL;
112 			reg_val = &cntx->fp.d.f[reg_num];
113 		} else
114 			return -EINVAL;
115 	} else
116 		return -EINVAL;
117 
118 	if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
119 		return -EFAULT;
120 
121 	return 0;
122 }
123 
124 int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
125 			      const struct kvm_one_reg *reg,
126 			      unsigned long rtype)
127 {
128 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
129 	unsigned long isa = vcpu->arch.isa;
130 	unsigned long __user *uaddr =
131 			(unsigned long __user *)(unsigned long)reg->addr;
132 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
133 					    KVM_REG_SIZE_MASK |
134 					    rtype);
135 	void *reg_val;
136 
137 	if ((rtype == KVM_REG_RISCV_FP_F) &&
138 	    riscv_isa_extension_available(&isa, f)) {
139 		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
140 			return -EINVAL;
141 		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
142 			reg_val = &cntx->fp.f.fcsr;
143 		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
144 			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
145 			reg_val = &cntx->fp.f.f[reg_num];
146 		else
147 			return -EINVAL;
148 	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
149 		   riscv_isa_extension_available(&isa, d)) {
150 		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
151 			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
152 				return -EINVAL;
153 			reg_val = &cntx->fp.d.fcsr;
154 		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
155 			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
156 			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
157 				return -EINVAL;
158 			reg_val = &cntx->fp.d.f[reg_num];
159 		} else
160 			return -EINVAL;
161 	} else
162 		return -EINVAL;
163 
164 	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
165 		return -EFAULT;
166 
167 	return 0;
168 }
169