xref: /linux/arch/riscv/kvm/vcpu_sbi_sta.c (revision 8a7c601e14576a22c2bbf7f67455ccf3f3d2737f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 Ventana Micro Systems Inc.
4  */
5 
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
9 #include <linux/mm.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/bug.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 #include <asm/page.h>
16 #include <asm/sbi.h>
17 #include <asm/uaccess.h>
18 
19 static void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
20 {
21 	vcpu->arch.sta.shmem = INVALID_GPA;
22 	vcpu->arch.sta.last_steal = 0;
23 }
24 
25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
26 {
27 	gpa_t shmem = vcpu->arch.sta.shmem;
28 	u64 last_steal = vcpu->arch.sta.last_steal;
29 	__le32 __user *sequence_ptr;
30 	__le64 __user *steal_ptr;
31 	__le32 sequence_le;
32 	__le64 steal_le;
33 	u32 sequence;
34 	u64 steal;
35 	unsigned long hva;
36 	gfn_t gfn;
37 
38 	if (shmem == INVALID_GPA)
39 		return;
40 
41 	/*
42 	 * shmem is 64-byte aligned (see the enforcement in
43 	 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
44 	 * is 64 bytes, so we know all its offsets are in the same page.
45 	 */
46 	gfn = shmem >> PAGE_SHIFT;
47 	hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
48 
49 	if (WARN_ON(kvm_is_error_hva(hva))) {
50 		vcpu->arch.sta.shmem = INVALID_GPA;
51 		return;
52 	}
53 
54 	sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
55 			       offsetof(struct sbi_sta_struct, sequence));
56 	steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
57 			    offsetof(struct sbi_sta_struct, steal));
58 
59 	if (WARN_ON(get_user(sequence_le, sequence_ptr)))
60 		return;
61 
62 	sequence = le32_to_cpu(sequence_le);
63 	sequence += 1;
64 
65 	if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
66 		return;
67 
68 	if (!WARN_ON(get_user(steal_le, steal_ptr))) {
69 		steal = le64_to_cpu(steal_le);
70 		vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
71 		steal += vcpu->arch.sta.last_steal - last_steal;
72 		WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
73 	}
74 
75 	sequence += 1;
76 	WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
77 
78 	kvm_vcpu_mark_page_dirty(vcpu, gfn);
79 }
80 
81 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
82 {
83 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
84 	unsigned long shmem_phys_lo = cp->a0;
85 	unsigned long shmem_phys_hi = cp->a1;
86 	u32 flags = cp->a2;
87 	struct sbi_sta_struct zero_sta = {0};
88 	gpa_t shmem;
89 	int ret;
90 
91 	if (flags != 0)
92 		return SBI_ERR_INVALID_PARAM;
93 
94 	if (shmem_phys_lo == SBI_SHMEM_DISABLE &&
95 	    shmem_phys_hi == SBI_SHMEM_DISABLE) {
96 		vcpu->arch.sta.shmem = INVALID_GPA;
97 		return 0;
98 	}
99 
100 	if (shmem_phys_lo & (SZ_64 - 1))
101 		return SBI_ERR_INVALID_PARAM;
102 
103 	shmem = shmem_phys_lo;
104 
105 	if (shmem_phys_hi != 0) {
106 		if (IS_ENABLED(CONFIG_32BIT))
107 			shmem |= ((gpa_t)shmem_phys_hi << 32);
108 		else
109 			return SBI_ERR_INVALID_ADDRESS;
110 	}
111 
112 	/* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
113 	ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
114 	if (ret)
115 		return SBI_ERR_INVALID_ADDRESS;
116 
117 	vcpu->arch.sta.shmem = shmem;
118 	vcpu->arch.sta.last_steal = current->sched_info.run_delay;
119 
120 	return 0;
121 }
122 
123 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
124 				   struct kvm_vcpu_sbi_return *retdata)
125 {
126 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
127 	unsigned long funcid = cp->a6;
128 	int ret;
129 
130 	switch (funcid) {
131 	case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
132 		ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
133 		break;
134 	default:
135 		ret = SBI_ERR_NOT_SUPPORTED;
136 		break;
137 	}
138 
139 	retdata->err_val = ret;
140 
141 	return 0;
142 }
143 
144 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
145 {
146 	return !!sched_info_on();
147 }
148 
149 static unsigned long kvm_sbi_ext_sta_get_state_reg_count(struct kvm_vcpu *vcpu)
150 {
151 	return sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
152 }
153 
154 static int kvm_sbi_ext_sta_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
155 				   unsigned long reg_size, void *reg_val)
156 {
157 	unsigned long *value;
158 
159 	if (reg_size != sizeof(unsigned long))
160 		return -EINVAL;
161 	value = reg_val;
162 
163 	switch (reg_num) {
164 	case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
165 		*value = (unsigned long)vcpu->arch.sta.shmem;
166 		break;
167 	case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
168 		if (IS_ENABLED(CONFIG_32BIT))
169 			*value = upper_32_bits(vcpu->arch.sta.shmem);
170 		else
171 			*value = 0;
172 		break;
173 	default:
174 		return -ENOENT;
175 	}
176 
177 	return 0;
178 }
179 
180 static int kvm_sbi_ext_sta_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
181 				   unsigned long reg_size, const void *reg_val)
182 {
183 	unsigned long value;
184 
185 	if (reg_size != sizeof(unsigned long))
186 		return -EINVAL;
187 	value = *(const unsigned long *)reg_val;
188 
189 	switch (reg_num) {
190 	case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
191 		if (IS_ENABLED(CONFIG_32BIT)) {
192 			gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
193 
194 			vcpu->arch.sta.shmem = value;
195 			vcpu->arch.sta.shmem |= hi << 32;
196 		} else {
197 			vcpu->arch.sta.shmem = value;
198 		}
199 		break;
200 	case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
201 		if (IS_ENABLED(CONFIG_32BIT)) {
202 			gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
203 
204 			vcpu->arch.sta.shmem = ((gpa_t)value << 32);
205 			vcpu->arch.sta.shmem |= lo;
206 		} else if (value != 0) {
207 			return -EINVAL;
208 		}
209 		break;
210 	default:
211 		return -ENOENT;
212 	}
213 
214 	return 0;
215 }
216 
217 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
218 	.extid_start = SBI_EXT_STA,
219 	.extid_end = SBI_EXT_STA,
220 	.handler = kvm_sbi_ext_sta_handler,
221 	.probe = kvm_sbi_ext_sta_probe,
222 	.reset = kvm_riscv_vcpu_sbi_sta_reset,
223 	.state_reg_subtype = KVM_REG_RISCV_SBI_STA,
224 	.get_state_reg_count = kvm_sbi_ext_sta_get_state_reg_count,
225 	.get_state_reg = kvm_sbi_ext_sta_get_reg,
226 	.set_state_reg = kvm_sbi_ext_sta_set_reg,
227 };
228