1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
9
ipi_send(struct kvm * kvm,uint64_t data)10 static void ipi_send(struct kvm *kvm, uint64_t data)
11 {
12 int cpu, action;
13 uint32_t status;
14 struct kvm_vcpu *vcpu;
15 struct kvm_interrupt irq;
16
17 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
18 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
19 if (unlikely(vcpu == NULL)) {
20 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
21 return;
22 }
23
24 action = BIT(data & 0x1f);
25 spin_lock(&vcpu->arch.ipi_state.lock);
26 status = vcpu->arch.ipi_state.status;
27 vcpu->arch.ipi_state.status |= action;
28 spin_unlock(&vcpu->arch.ipi_state.lock);
29 if (status == 0) {
30 irq.irq = LARCH_INT_IPI;
31 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
32 }
33 }
34
ipi_clear(struct kvm_vcpu * vcpu,uint64_t data)35 static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
36 {
37 uint32_t status;
38 struct kvm_interrupt irq;
39
40 spin_lock(&vcpu->arch.ipi_state.lock);
41 vcpu->arch.ipi_state.status &= ~data;
42 status = vcpu->arch.ipi_state.status;
43 spin_unlock(&vcpu->arch.ipi_state.lock);
44 if (status == 0) {
45 irq.irq = -LARCH_INT_IPI;
46 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
47 }
48 }
49
read_mailbox(struct kvm_vcpu * vcpu,int offset,int len)50 static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
51 {
52 uint64_t data = 0;
53
54 spin_lock(&vcpu->arch.ipi_state.lock);
55 data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
56 spin_unlock(&vcpu->arch.ipi_state.lock);
57
58 switch (len) {
59 case 1:
60 return data & 0xff;
61 case 2:
62 return data & 0xffff;
63 case 4:
64 return data & 0xffffffff;
65 case 8:
66 return data;
67 default:
68 kvm_err("%s: unknown data len: %d\n", __func__, len);
69 return 0;
70 }
71 }
72
write_mailbox(struct kvm_vcpu * vcpu,int offset,uint64_t data,int len)73 static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
74 {
75 void *pbuf;
76
77 spin_lock(&vcpu->arch.ipi_state.lock);
78 pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
79
80 switch (len) {
81 case 1:
82 *(unsigned char *)pbuf = (unsigned char)data;
83 break;
84 case 2:
85 *(unsigned short *)pbuf = (unsigned short)data;
86 break;
87 case 4:
88 *(unsigned int *)pbuf = (unsigned int)data;
89 break;
90 case 8:
91 *(unsigned long *)pbuf = (unsigned long)data;
92 break;
93 default:
94 kvm_err("%s: unknown data len: %d\n", __func__, len);
95 }
96 spin_unlock(&vcpu->arch.ipi_state.lock);
97 }
98
send_ipi_data(struct kvm_vcpu * vcpu,gpa_t addr,uint64_t data)99 static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
100 {
101 int i, idx, ret;
102 uint32_t val = 0, mask = 0;
103
104 /*
105 * Bit 27-30 is mask for byte writing.
106 * If the mask is 0, we need not to do anything.
107 */
108 if ((data >> 27) & 0xf) {
109 /* Read the old val */
110 idx = srcu_read_lock(&vcpu->kvm->srcu);
111 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
112 srcu_read_unlock(&vcpu->kvm->srcu, idx);
113 if (unlikely(ret)) {
114 kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
115 return ret;
116 }
117 /* Construct the mask by scanning the bit 27-30 */
118 for (i = 0; i < 4; i++) {
119 if (data & (BIT(27 + i)))
120 mask |= (0xff << (i * 8));
121 }
122 /* Save the old part of val */
123 val &= mask;
124 }
125 val |= ((uint32_t)(data >> 32) & ~mask);
126 idx = srcu_read_lock(&vcpu->kvm->srcu);
127 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
128 srcu_read_unlock(&vcpu->kvm->srcu, idx);
129 if (unlikely(ret))
130 kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
131
132 return ret;
133 }
134
mail_send(struct kvm * kvm,uint64_t data)135 static int mail_send(struct kvm *kvm, uint64_t data)
136 {
137 int cpu, mailbox, offset;
138 struct kvm_vcpu *vcpu;
139
140 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
141 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
142 if (unlikely(vcpu == NULL)) {
143 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
144 return -EINVAL;
145 }
146 mailbox = ((data & 0xffffffff) >> 2) & 0x7;
147 offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4;
148
149 return send_ipi_data(vcpu, offset, data);
150 }
151
any_send(struct kvm * kvm,uint64_t data)152 static int any_send(struct kvm *kvm, uint64_t data)
153 {
154 int cpu, offset;
155 struct kvm_vcpu *vcpu;
156
157 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
158 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
159 if (unlikely(vcpu == NULL)) {
160 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
161 return -EINVAL;
162 }
163 offset = data & 0xffff;
164
165 return send_ipi_data(vcpu, offset, data);
166 }
167
loongarch_ipi_readl(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * val)168 static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
169 {
170 int ret = 0;
171 uint32_t offset;
172 uint64_t res = 0;
173
174 offset = (uint32_t)(addr & 0x1ff);
175 WARN_ON_ONCE(offset & (len - 1));
176
177 switch (offset) {
178 case IOCSR_IPI_STATUS:
179 spin_lock(&vcpu->arch.ipi_state.lock);
180 res = vcpu->arch.ipi_state.status;
181 spin_unlock(&vcpu->arch.ipi_state.lock);
182 break;
183 case IOCSR_IPI_EN:
184 spin_lock(&vcpu->arch.ipi_state.lock);
185 res = vcpu->arch.ipi_state.en;
186 spin_unlock(&vcpu->arch.ipi_state.lock);
187 break;
188 case IOCSR_IPI_SET:
189 res = 0;
190 break;
191 case IOCSR_IPI_CLEAR:
192 res = 0;
193 break;
194 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
195 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
196 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
197 __func__, offset, len);
198 ret = -EINVAL;
199 break;
200 }
201 res = read_mailbox(vcpu, offset, len);
202 break;
203 default:
204 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
205 ret = -EINVAL;
206 break;
207 }
208 *(uint64_t *)val = res;
209
210 return ret;
211 }
212
loongarch_ipi_writel(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * val)213 static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
214 {
215 int ret = 0;
216 uint64_t data;
217 uint32_t offset;
218
219 data = *(uint64_t *)val;
220
221 offset = (uint32_t)(addr & 0x1ff);
222 WARN_ON_ONCE(offset & (len - 1));
223
224 switch (offset) {
225 case IOCSR_IPI_STATUS:
226 ret = -EINVAL;
227 break;
228 case IOCSR_IPI_EN:
229 spin_lock(&vcpu->arch.ipi_state.lock);
230 vcpu->arch.ipi_state.en = data;
231 spin_unlock(&vcpu->arch.ipi_state.lock);
232 break;
233 case IOCSR_IPI_SET:
234 ret = -EINVAL;
235 break;
236 case IOCSR_IPI_CLEAR:
237 /* Just clear the status of the current vcpu */
238 ipi_clear(vcpu, data);
239 break;
240 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
241 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
242 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
243 __func__, offset, len);
244 ret = -EINVAL;
245 break;
246 }
247 write_mailbox(vcpu, offset, data, len);
248 break;
249 case IOCSR_IPI_SEND:
250 ipi_send(vcpu->kvm, data);
251 break;
252 case IOCSR_MAIL_SEND:
253 ret = mail_send(vcpu->kvm, *(uint64_t *)val);
254 break;
255 case IOCSR_ANY_SEND:
256 ret = any_send(vcpu->kvm, *(uint64_t *)val);
257 break;
258 default:
259 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
260 ret = -EINVAL;
261 break;
262 }
263
264 return ret;
265 }
266
kvm_ipi_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)267 static int kvm_ipi_read(struct kvm_vcpu *vcpu,
268 struct kvm_io_device *dev,
269 gpa_t addr, int len, void *val)
270 {
271 int ret;
272 struct loongarch_ipi *ipi;
273
274 ipi = vcpu->kvm->arch.ipi;
275 if (!ipi) {
276 kvm_err("%s: ipi irqchip not valid!\n", __func__);
277 return -EINVAL;
278 }
279 ipi->kvm->stat.ipi_read_exits++;
280 ret = loongarch_ipi_readl(vcpu, addr, len, val);
281
282 return ret;
283 }
284
kvm_ipi_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)285 static int kvm_ipi_write(struct kvm_vcpu *vcpu,
286 struct kvm_io_device *dev,
287 gpa_t addr, int len, const void *val)
288 {
289 int ret;
290 struct loongarch_ipi *ipi;
291
292 ipi = vcpu->kvm->arch.ipi;
293 if (!ipi) {
294 kvm_err("%s: ipi irqchip not valid!\n", __func__);
295 return -EINVAL;
296 }
297 ipi->kvm->stat.ipi_write_exits++;
298 ret = loongarch_ipi_writel(vcpu, addr, len, val);
299
300 return ret;
301 }
302
303 static const struct kvm_io_device_ops kvm_ipi_ops = {
304 .read = kvm_ipi_read,
305 .write = kvm_ipi_write,
306 };
307
kvm_ipi_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)308 static int kvm_ipi_regs_access(struct kvm_device *dev,
309 struct kvm_device_attr *attr,
310 bool is_write)
311 {
312 int len = 4;
313 int cpu, addr;
314 uint64_t val;
315 void *p = NULL;
316 struct kvm_vcpu *vcpu;
317
318 cpu = (attr->attr >> 16) & 0x3ff;
319 addr = attr->attr & 0xff;
320
321 vcpu = kvm_get_vcpu(dev->kvm, cpu);
322 if (unlikely(vcpu == NULL)) {
323 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
324 return -EINVAL;
325 }
326
327 switch (addr) {
328 case IOCSR_IPI_STATUS:
329 p = &vcpu->arch.ipi_state.status;
330 break;
331 case IOCSR_IPI_EN:
332 p = &vcpu->arch.ipi_state.en;
333 break;
334 case IOCSR_IPI_SET:
335 p = &vcpu->arch.ipi_state.set;
336 break;
337 case IOCSR_IPI_CLEAR:
338 p = &vcpu->arch.ipi_state.clear;
339 break;
340 case IOCSR_IPI_BUF_20:
341 p = &vcpu->arch.ipi_state.buf[0];
342 len = 8;
343 break;
344 case IOCSR_IPI_BUF_28:
345 p = &vcpu->arch.ipi_state.buf[1];
346 len = 8;
347 break;
348 case IOCSR_IPI_BUF_30:
349 p = &vcpu->arch.ipi_state.buf[2];
350 len = 8;
351 break;
352 case IOCSR_IPI_BUF_38:
353 p = &vcpu->arch.ipi_state.buf[3];
354 len = 8;
355 break;
356 default:
357 kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
358 return -EINVAL;
359 }
360
361 if (is_write) {
362 if (len == 4) {
363 if (get_user(val, (uint32_t __user *)attr->addr))
364 return -EFAULT;
365 *(uint32_t *)p = (uint32_t)val;
366 } else if (len == 8) {
367 if (get_user(val, (uint64_t __user *)attr->addr))
368 return -EFAULT;
369 *(uint64_t *)p = val;
370 }
371 } else {
372 if (len == 4) {
373 val = *(uint32_t *)p;
374 return put_user(val, (uint32_t __user *)attr->addr);
375 } else if (len == 8) {
376 val = *(uint64_t *)p;
377 return put_user(val, (uint64_t __user *)attr->addr);
378 }
379 }
380
381 return 0;
382 }
383
kvm_ipi_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)384 static int kvm_ipi_get_attr(struct kvm_device *dev,
385 struct kvm_device_attr *attr)
386 {
387 switch (attr->group) {
388 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
389 return kvm_ipi_regs_access(dev, attr, false);
390 default:
391 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
392 return -EINVAL;
393 }
394 }
395
kvm_ipi_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)396 static int kvm_ipi_set_attr(struct kvm_device *dev,
397 struct kvm_device_attr *attr)
398 {
399 switch (attr->group) {
400 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
401 return kvm_ipi_regs_access(dev, attr, true);
402 default:
403 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
404 return -EINVAL;
405 }
406 }
407
kvm_ipi_create(struct kvm_device * dev,u32 type)408 static int kvm_ipi_create(struct kvm_device *dev, u32 type)
409 {
410 int ret;
411 struct kvm *kvm;
412 struct kvm_io_device *device;
413 struct loongarch_ipi *s;
414
415 if (!dev) {
416 kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
417 return -EINVAL;
418 }
419
420 kvm = dev->kvm;
421 if (kvm->arch.ipi) {
422 kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
423 return -EINVAL;
424 }
425
426 s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
427 if (!s)
428 return -ENOMEM;
429
430 spin_lock_init(&s->lock);
431 s->kvm = kvm;
432
433 /*
434 * Initialize IOCSR device
435 */
436 device = &s->device;
437 kvm_iodevice_init(device, &kvm_ipi_ops);
438 mutex_lock(&kvm->slots_lock);
439 ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
440 mutex_unlock(&kvm->slots_lock);
441 if (ret < 0) {
442 kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
443 goto err;
444 }
445
446 kvm->arch.ipi = s;
447 return 0;
448
449 err:
450 kfree(s);
451 return -EFAULT;
452 }
453
kvm_ipi_destroy(struct kvm_device * dev)454 static void kvm_ipi_destroy(struct kvm_device *dev)
455 {
456 struct kvm *kvm;
457 struct loongarch_ipi *ipi;
458
459 if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
460 return;
461
462 kvm = dev->kvm;
463 ipi = kvm->arch.ipi;
464 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
465 kfree(ipi);
466 }
467
468 static struct kvm_device_ops kvm_ipi_dev_ops = {
469 .name = "kvm-loongarch-ipi",
470 .create = kvm_ipi_create,
471 .destroy = kvm_ipi_destroy,
472 .set_attr = kvm_ipi_set_attr,
473 .get_attr = kvm_ipi_get_attr,
474 };
475
kvm_loongarch_register_ipi_device(void)476 int kvm_loongarch_register_ipi_device(void)
477 {
478 return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
479 }
480