1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
9
ipi_send(struct kvm * kvm,uint64_t data)10 static void ipi_send(struct kvm *kvm, uint64_t data)
11 {
12 int cpu, action;
13 uint32_t status;
14 struct kvm_vcpu *vcpu;
15 struct kvm_interrupt irq;
16
17 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
18 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
19 if (unlikely(vcpu == NULL)) {
20 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
21 return;
22 }
23
24 action = BIT(data & 0x1f);
25 spin_lock(&vcpu->arch.ipi_state.lock);
26 status = vcpu->arch.ipi_state.status;
27 vcpu->arch.ipi_state.status |= action;
28 spin_unlock(&vcpu->arch.ipi_state.lock);
29 if (status == 0) {
30 irq.irq = LARCH_INT_IPI;
31 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
32 }
33 }
34
ipi_clear(struct kvm_vcpu * vcpu,uint64_t data)35 static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
36 {
37 uint32_t status;
38 struct kvm_interrupt irq;
39
40 spin_lock(&vcpu->arch.ipi_state.lock);
41 vcpu->arch.ipi_state.status &= ~data;
42 status = vcpu->arch.ipi_state.status;
43 spin_unlock(&vcpu->arch.ipi_state.lock);
44 if (status == 0) {
45 irq.irq = -LARCH_INT_IPI;
46 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
47 }
48 }
49
read_mailbox(struct kvm_vcpu * vcpu,int offset,int len)50 static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
51 {
52 uint64_t data = 0;
53
54 spin_lock(&vcpu->arch.ipi_state.lock);
55 data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
56 spin_unlock(&vcpu->arch.ipi_state.lock);
57
58 switch (len) {
59 case 1:
60 return data & 0xff;
61 case 2:
62 return data & 0xffff;
63 case 4:
64 return data & 0xffffffff;
65 case 8:
66 return data;
67 default:
68 kvm_err("%s: unknown data len: %d\n", __func__, len);
69 return 0;
70 }
71 }
72
write_mailbox(struct kvm_vcpu * vcpu,int offset,uint64_t data,int len)73 static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
74 {
75 void *pbuf;
76
77 spin_lock(&vcpu->arch.ipi_state.lock);
78 pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
79
80 switch (len) {
81 case 1:
82 *(unsigned char *)pbuf = (unsigned char)data;
83 break;
84 case 2:
85 *(unsigned short *)pbuf = (unsigned short)data;
86 break;
87 case 4:
88 *(unsigned int *)pbuf = (unsigned int)data;
89 break;
90 case 8:
91 *(unsigned long *)pbuf = (unsigned long)data;
92 break;
93 default:
94 kvm_err("%s: unknown data len: %d\n", __func__, len);
95 }
96 spin_unlock(&vcpu->arch.ipi_state.lock);
97 }
98
send_ipi_data(struct kvm_vcpu * vcpu,gpa_t addr,uint64_t data)99 static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
100 {
101 int i, idx, ret;
102 uint64_t val = 0, mask = 0;
103
104 /*
105 * Bit 27-30 is mask for byte writing.
106 * If the mask is 0, we need not to do anything.
107 */
108 if ((data >> 27) & 0xf) {
109 /* Read the old val */
110 idx = srcu_read_lock(&vcpu->kvm->srcu);
111 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
112 srcu_read_unlock(&vcpu->kvm->srcu, idx);
113 if (unlikely(ret)) {
114 kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
115 return ret;
116 }
117 /* Construct the mask by scanning the bit 27-30 */
118 for (i = 0; i < 4; i++) {
119 if (data & (BIT(27 + i)))
120 mask |= (0xff << (i * 8));
121 }
122 /* Save the old part of val */
123 val &= mask;
124 }
125 val |= ((uint32_t)(data >> 32) & ~mask);
126 idx = srcu_read_lock(&vcpu->kvm->srcu);
127 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
128 srcu_read_unlock(&vcpu->kvm->srcu, idx);
129 if (unlikely(ret))
130 kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
131
132 return ret;
133 }
134
mail_send(struct kvm * kvm,uint64_t data)135 static int mail_send(struct kvm *kvm, uint64_t data)
136 {
137 int cpu, mailbox, offset;
138 struct kvm_vcpu *vcpu;
139
140 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
141 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
142 if (unlikely(vcpu == NULL)) {
143 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
144 return -EINVAL;
145 }
146 mailbox = ((data & 0xffffffff) >> 2) & 0x7;
147 offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4;
148
149 return send_ipi_data(vcpu, offset, data);
150 }
151
any_send(struct kvm * kvm,uint64_t data)152 static int any_send(struct kvm *kvm, uint64_t data)
153 {
154 int cpu, offset;
155 struct kvm_vcpu *vcpu;
156
157 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
158 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
159 if (unlikely(vcpu == NULL)) {
160 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
161 return -EINVAL;
162 }
163 offset = data & 0xffff;
164
165 return send_ipi_data(vcpu, offset, data);
166 }
167
loongarch_ipi_readl(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * val)168 static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
169 {
170 int ret = 0;
171 uint32_t offset;
172 uint64_t res = 0;
173
174 offset = (uint32_t)(addr & 0x1ff);
175 WARN_ON_ONCE(offset & (len - 1));
176
177 switch (offset) {
178 case IOCSR_IPI_STATUS:
179 spin_lock(&vcpu->arch.ipi_state.lock);
180 res = vcpu->arch.ipi_state.status;
181 spin_unlock(&vcpu->arch.ipi_state.lock);
182 break;
183 case IOCSR_IPI_EN:
184 spin_lock(&vcpu->arch.ipi_state.lock);
185 res = vcpu->arch.ipi_state.en;
186 spin_unlock(&vcpu->arch.ipi_state.lock);
187 break;
188 case IOCSR_IPI_SET:
189 res = 0;
190 break;
191 case IOCSR_IPI_CLEAR:
192 res = 0;
193 break;
194 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
195 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
196 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
197 __func__, offset, len);
198 ret = -EINVAL;
199 break;
200 }
201 res = read_mailbox(vcpu, offset, len);
202 break;
203 default:
204 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
205 ret = -EINVAL;
206 break;
207 }
208 *(uint64_t *)val = res;
209
210 return ret;
211 }
212
loongarch_ipi_writel(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * val)213 static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
214 {
215 int ret = 0;
216 uint64_t data;
217 uint32_t offset;
218
219 data = *(uint64_t *)val;
220
221 offset = (uint32_t)(addr & 0x1ff);
222 WARN_ON_ONCE(offset & (len - 1));
223
224 switch (offset) {
225 case IOCSR_IPI_STATUS:
226 ret = -EINVAL;
227 break;
228 case IOCSR_IPI_EN:
229 spin_lock(&vcpu->arch.ipi_state.lock);
230 vcpu->arch.ipi_state.en = data;
231 spin_unlock(&vcpu->arch.ipi_state.lock);
232 break;
233 case IOCSR_IPI_SET:
234 ret = -EINVAL;
235 break;
236 case IOCSR_IPI_CLEAR:
237 /* Just clear the status of the current vcpu */
238 ipi_clear(vcpu, data);
239 break;
240 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
241 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
242 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
243 __func__, offset, len);
244 ret = -EINVAL;
245 break;
246 }
247 write_mailbox(vcpu, offset, data, len);
248 break;
249 case IOCSR_IPI_SEND:
250 ipi_send(vcpu->kvm, data);
251 break;
252 case IOCSR_MAIL_SEND:
253 ret = mail_send(vcpu->kvm, *(uint64_t *)val);
254 break;
255 case IOCSR_ANY_SEND:
256 ret = any_send(vcpu->kvm, *(uint64_t *)val);
257 break;
258 default:
259 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
260 ret = -EINVAL;
261 break;
262 }
263
264 return ret;
265 }
266
kvm_ipi_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)267 static int kvm_ipi_read(struct kvm_vcpu *vcpu,
268 struct kvm_io_device *dev,
269 gpa_t addr, int len, void *val)
270 {
271 vcpu->stat.ipi_read_exits++;
272 return loongarch_ipi_readl(vcpu, addr, len, val);
273 }
274
kvm_ipi_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)275 static int kvm_ipi_write(struct kvm_vcpu *vcpu,
276 struct kvm_io_device *dev,
277 gpa_t addr, int len, const void *val)
278 {
279 vcpu->stat.ipi_write_exits++;
280 return loongarch_ipi_writel(vcpu, addr, len, val);
281 }
282
283 static const struct kvm_io_device_ops kvm_ipi_ops = {
284 .read = kvm_ipi_read,
285 .write = kvm_ipi_write,
286 };
287
kvm_ipi_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)288 static int kvm_ipi_regs_access(struct kvm_device *dev,
289 struct kvm_device_attr *attr,
290 bool is_write)
291 {
292 int len = 4;
293 int cpu, addr;
294 uint64_t val;
295 void *p = NULL;
296 struct kvm_vcpu *vcpu;
297
298 cpu = (attr->attr >> 16) & 0x3ff;
299 addr = attr->attr & 0xff;
300
301 vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
302 if (unlikely(vcpu == NULL)) {
303 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
304 return -EINVAL;
305 }
306
307 switch (addr) {
308 case IOCSR_IPI_STATUS:
309 p = &vcpu->arch.ipi_state.status;
310 break;
311 case IOCSR_IPI_EN:
312 p = &vcpu->arch.ipi_state.en;
313 break;
314 case IOCSR_IPI_SET:
315 p = &vcpu->arch.ipi_state.set;
316 break;
317 case IOCSR_IPI_CLEAR:
318 p = &vcpu->arch.ipi_state.clear;
319 break;
320 case IOCSR_IPI_BUF_20:
321 p = &vcpu->arch.ipi_state.buf[0];
322 len = 8;
323 break;
324 case IOCSR_IPI_BUF_28:
325 p = &vcpu->arch.ipi_state.buf[1];
326 len = 8;
327 break;
328 case IOCSR_IPI_BUF_30:
329 p = &vcpu->arch.ipi_state.buf[2];
330 len = 8;
331 break;
332 case IOCSR_IPI_BUF_38:
333 p = &vcpu->arch.ipi_state.buf[3];
334 len = 8;
335 break;
336 default:
337 kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
338 return -EINVAL;
339 }
340
341 if (is_write) {
342 if (len == 4) {
343 if (get_user(val, (uint32_t __user *)attr->addr))
344 return -EFAULT;
345 *(uint32_t *)p = (uint32_t)val;
346 } else if (len == 8) {
347 if (get_user(val, (uint64_t __user *)attr->addr))
348 return -EFAULT;
349 *(uint64_t *)p = val;
350 }
351 } else {
352 if (len == 4) {
353 val = *(uint32_t *)p;
354 return put_user(val, (uint32_t __user *)attr->addr);
355 } else if (len == 8) {
356 val = *(uint64_t *)p;
357 return put_user(val, (uint64_t __user *)attr->addr);
358 }
359 }
360
361 return 0;
362 }
363
kvm_ipi_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)364 static int kvm_ipi_get_attr(struct kvm_device *dev,
365 struct kvm_device_attr *attr)
366 {
367 switch (attr->group) {
368 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
369 return kvm_ipi_regs_access(dev, attr, false);
370 default:
371 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
372 return -EINVAL;
373 }
374 }
375
kvm_ipi_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)376 static int kvm_ipi_set_attr(struct kvm_device *dev,
377 struct kvm_device_attr *attr)
378 {
379 switch (attr->group) {
380 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
381 return kvm_ipi_regs_access(dev, attr, true);
382 default:
383 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
384 return -EINVAL;
385 }
386 }
387
kvm_ipi_create(struct kvm_device * dev,u32 type)388 static int kvm_ipi_create(struct kvm_device *dev, u32 type)
389 {
390 int ret;
391 struct kvm *kvm;
392 struct kvm_io_device *device;
393 struct loongarch_ipi *s;
394
395 if (!dev) {
396 kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
397 return -EINVAL;
398 }
399
400 kvm = dev->kvm;
401 if (kvm->arch.ipi) {
402 kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
403 return -EINVAL;
404 }
405
406 s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
407 if (!s)
408 return -ENOMEM;
409
410 spin_lock_init(&s->lock);
411 s->kvm = kvm;
412
413 /*
414 * Initialize IOCSR device
415 */
416 device = &s->device;
417 kvm_iodevice_init(device, &kvm_ipi_ops);
418 mutex_lock(&kvm->slots_lock);
419 ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
420 mutex_unlock(&kvm->slots_lock);
421 if (ret < 0) {
422 kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
423 goto err;
424 }
425
426 kvm->arch.ipi = s;
427 return 0;
428
429 err:
430 kfree(s);
431 return -EFAULT;
432 }
433
kvm_ipi_destroy(struct kvm_device * dev)434 static void kvm_ipi_destroy(struct kvm_device *dev)
435 {
436 struct kvm *kvm;
437 struct loongarch_ipi *ipi;
438
439 if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
440 return;
441
442 kvm = dev->kvm;
443 ipi = kvm->arch.ipi;
444 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
445 kfree(ipi);
446 }
447
448 static struct kvm_device_ops kvm_ipi_dev_ops = {
449 .name = "kvm-loongarch-ipi",
450 .create = kvm_ipi_create,
451 .destroy = kvm_ipi_destroy,
452 .set_attr = kvm_ipi_set_attr,
453 .get_attr = kvm_ipi_get_attr,
454 };
455
kvm_loongarch_register_ipi_device(void)456 int kvm_loongarch_register_ipi_device(void)
457 {
458 return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
459 }
460