1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
9 #include <linux/mm.h>
10 #include <linux/sizes.h>
11
12 #include <asm/bug.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 #include <asm/page.h>
16 #include <asm/sbi.h>
17 #include <asm/uaccess.h>
18
kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu * vcpu)19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
20 {
21 vcpu->arch.sta.shmem = INVALID_GPA;
22 vcpu->arch.sta.last_steal = 0;
23 }
24
kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu * vcpu)25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
26 {
27 gpa_t shmem = vcpu->arch.sta.shmem;
28 u64 last_steal = vcpu->arch.sta.last_steal;
29 __le32 __user *sequence_ptr;
30 __le64 __user *steal_ptr;
31 __le32 sequence_le;
32 __le64 steal_le;
33 u32 sequence;
34 u64 steal;
35 unsigned long hva;
36 gfn_t gfn;
37
38 if (shmem == INVALID_GPA)
39 return;
40
41 /*
42 * shmem is 64-byte aligned (see the enforcement in
43 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
44 * is 64 bytes, so we know all its offsets are in the same page.
45 */
46 gfn = shmem >> PAGE_SHIFT;
47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
48
49 if (WARN_ON(kvm_is_error_hva(hva))) {
50 vcpu->arch.sta.shmem = INVALID_GPA;
51 return;
52 }
53
54 sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
55 offsetof(struct sbi_sta_struct, sequence));
56 steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
57 offsetof(struct sbi_sta_struct, steal));
58
59 if (WARN_ON(get_user(sequence_le, sequence_ptr)))
60 return;
61
62 sequence = le32_to_cpu(sequence_le);
63 sequence += 1;
64
65 if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
66 return;
67
68 if (!WARN_ON(get_user(steal_le, steal_ptr))) {
69 steal = le64_to_cpu(steal_le);
70 vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
71 steal += vcpu->arch.sta.last_steal - last_steal;
72 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
73 }
74
75 sequence += 1;
76 WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
77
78 kvm_vcpu_mark_page_dirty(vcpu, gfn);
79 }
80
kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu * vcpu)81 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
82 {
83 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
84 unsigned long shmem_phys_lo = cp->a0;
85 unsigned long shmem_phys_hi = cp->a1;
86 u32 flags = cp->a2;
87 struct sbi_sta_struct zero_sta = {0};
88 unsigned long hva;
89 bool writable;
90 gpa_t shmem;
91 int ret;
92
93 if (flags != 0)
94 return SBI_ERR_INVALID_PARAM;
95
96 if (shmem_phys_lo == SBI_SHMEM_DISABLE &&
97 shmem_phys_hi == SBI_SHMEM_DISABLE) {
98 vcpu->arch.sta.shmem = INVALID_GPA;
99 return 0;
100 }
101
102 if (shmem_phys_lo & (SZ_64 - 1))
103 return SBI_ERR_INVALID_PARAM;
104
105 shmem = shmem_phys_lo;
106
107 if (shmem_phys_hi != 0) {
108 if (IS_ENABLED(CONFIG_32BIT))
109 shmem |= ((gpa_t)shmem_phys_hi << 32);
110 else
111 return SBI_ERR_INVALID_ADDRESS;
112 }
113
114 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
115 if (kvm_is_error_hva(hva) || !writable)
116 return SBI_ERR_INVALID_ADDRESS;
117
118 ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
119 if (ret)
120 return SBI_ERR_FAILURE;
121
122 vcpu->arch.sta.shmem = shmem;
123 vcpu->arch.sta.last_steal = current->sched_info.run_delay;
124
125 return 0;
126 }
127
kvm_sbi_ext_sta_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)128 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
129 struct kvm_vcpu_sbi_return *retdata)
130 {
131 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
132 unsigned long funcid = cp->a6;
133 int ret;
134
135 switch (funcid) {
136 case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
137 ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
138 break;
139 default:
140 ret = SBI_ERR_NOT_SUPPORTED;
141 break;
142 }
143
144 retdata->err_val = ret;
145
146 return 0;
147 }
148
kvm_sbi_ext_sta_probe(struct kvm_vcpu * vcpu)149 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
150 {
151 return !!sched_info_on();
152 }
153
154 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
155 .extid_start = SBI_EXT_STA,
156 .extid_end = SBI_EXT_STA,
157 .handler = kvm_sbi_ext_sta_handler,
158 .probe = kvm_sbi_ext_sta_probe,
159 };
160
kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)161 int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
162 unsigned long reg_num,
163 unsigned long *reg_val)
164 {
165 switch (reg_num) {
166 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
167 *reg_val = (unsigned long)vcpu->arch.sta.shmem;
168 break;
169 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
170 if (IS_ENABLED(CONFIG_32BIT))
171 *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
172 else
173 *reg_val = 0;
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 return 0;
180 }
181
kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)182 int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
183 unsigned long reg_num,
184 unsigned long reg_val)
185 {
186 switch (reg_num) {
187 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
188 if (IS_ENABLED(CONFIG_32BIT)) {
189 gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
190
191 vcpu->arch.sta.shmem = reg_val;
192 vcpu->arch.sta.shmem |= hi << 32;
193 } else {
194 vcpu->arch.sta.shmem = reg_val;
195 }
196 break;
197 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
198 if (IS_ENABLED(CONFIG_32BIT)) {
199 gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
200
201 vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
202 vcpu->arch.sta.shmem |= lo;
203 } else if (reg_val != 0) {
204 return -EINVAL;
205 }
206 break;
207 default:
208 return -EINVAL;
209 }
210
211 return 0;
212 }
213