1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
9
ipi_set(struct kvm_vcpu * vcpu,uint32_t data)10 static void ipi_set(struct kvm_vcpu *vcpu, uint32_t data)
11 {
12 uint32_t status;
13 struct kvm_interrupt irq;
14
15 spin_lock(&vcpu->arch.ipi_state.lock);
16 status = vcpu->arch.ipi_state.status;
17 vcpu->arch.ipi_state.status |= data;
18 spin_unlock(&vcpu->arch.ipi_state.lock);
19 if ((status == 0) && data) {
20 irq.irq = LARCH_INT_IPI;
21 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
22 }
23 }
24
ipi_send(struct kvm * kvm,uint64_t data)25 static void ipi_send(struct kvm *kvm, uint64_t data)
26 {
27 int cpu;
28 struct kvm_vcpu *vcpu;
29
30 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
31 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
32 if (unlikely(vcpu == NULL)) {
33 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
34 return;
35 }
36
37 ipi_set(vcpu, BIT(data & 0x1f));
38 }
39
ipi_clear(struct kvm_vcpu * vcpu,uint64_t data)40 static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
41 {
42 uint32_t status;
43 struct kvm_interrupt irq;
44
45 spin_lock(&vcpu->arch.ipi_state.lock);
46 vcpu->arch.ipi_state.status &= ~data;
47 status = vcpu->arch.ipi_state.status;
48 spin_unlock(&vcpu->arch.ipi_state.lock);
49 if (status == 0) {
50 irq.irq = -LARCH_INT_IPI;
51 kvm_vcpu_ioctl_interrupt(vcpu, &irq);
52 }
53 }
54
read_mailbox(struct kvm_vcpu * vcpu,int offset,int len)55 static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
56 {
57 uint64_t data = 0;
58
59 spin_lock(&vcpu->arch.ipi_state.lock);
60 data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
61 spin_unlock(&vcpu->arch.ipi_state.lock);
62
63 switch (len) {
64 case 1:
65 return data & 0xff;
66 case 2:
67 return data & 0xffff;
68 case 4:
69 return data & 0xffffffff;
70 case 8:
71 return data;
72 default:
73 kvm_err("%s: unknown data len: %d\n", __func__, len);
74 return 0;
75 }
76 }
77
write_mailbox(struct kvm_vcpu * vcpu,int offset,uint64_t data,int len)78 static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
79 {
80 void *pbuf;
81
82 spin_lock(&vcpu->arch.ipi_state.lock);
83 pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
84
85 switch (len) {
86 case 1:
87 *(unsigned char *)pbuf = (unsigned char)data;
88 break;
89 case 2:
90 *(unsigned short *)pbuf = (unsigned short)data;
91 break;
92 case 4:
93 *(unsigned int *)pbuf = (unsigned int)data;
94 break;
95 case 8:
96 *(unsigned long *)pbuf = (unsigned long)data;
97 break;
98 default:
99 kvm_err("%s: unknown data len: %d\n", __func__, len);
100 }
101 spin_unlock(&vcpu->arch.ipi_state.lock);
102 }
103
mail_send(struct kvm * kvm,uint64_t data)104 static int mail_send(struct kvm *kvm, uint64_t data)
105 {
106 int i, cpu, mailbox, offset;
107 uint32_t val = 0, mask = 0;
108 struct kvm_vcpu *vcpu;
109
110 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
111 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
112 if (unlikely(vcpu == NULL)) {
113 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
114 return -EINVAL;
115 }
116 mailbox = ((data & 0xffffffff) >> 2) & 0x7;
117 offset = IOCSR_IPI_BUF_20 + mailbox * 4;
118 if ((data >> 27) & 0xf) {
119 val = read_mailbox(vcpu, offset, 4);
120 for (i = 0; i < 4; i++)
121 if (data & (BIT(27 + i)))
122 mask |= (0xff << (i * 8));
123 val &= mask;
124 }
125
126 val |= ((uint32_t)(data >> 32) & ~mask);
127 write_mailbox(vcpu, offset, val, 4);
128
129 return 0;
130 }
131
send_ipi_data(struct kvm_vcpu * vcpu,gpa_t addr,uint64_t data)132 static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
133 {
134 int i, idx, ret;
135 uint64_t val = 0, mask = 0;
136
137 /*
138 * Bit 27-30 is mask for byte writing.
139 * If the mask is 0, we need not to do anything.
140 */
141 if ((data >> 27) & 0xf) {
142 /* Read the old val */
143 idx = srcu_read_lock(&vcpu->kvm->srcu);
144 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
145 srcu_read_unlock(&vcpu->kvm->srcu, idx);
146 if (unlikely(ret)) {
147 kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
148 return ret;
149 }
150 /* Construct the mask by scanning the bit 27-30 */
151 for (i = 0; i < 4; i++) {
152 if (data & (BIT(27 + i)))
153 mask |= (0xff << (i * 8));
154 }
155 /* Save the old part of val */
156 val &= mask;
157 }
158 val |= ((uint32_t)(data >> 32) & ~mask);
159 idx = srcu_read_lock(&vcpu->kvm->srcu);
160 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
161 srcu_read_unlock(&vcpu->kvm->srcu, idx);
162 if (unlikely(ret))
163 kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
164
165 return ret;
166 }
167
any_send(struct kvm * kvm,uint64_t data)168 static int any_send(struct kvm *kvm, uint64_t data)
169 {
170 int cpu, offset;
171 struct kvm_vcpu *vcpu;
172
173 cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
174 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
175 if (unlikely(vcpu == NULL)) {
176 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
177 return -EINVAL;
178 }
179 offset = data & 0xffff;
180
181 return send_ipi_data(vcpu, offset, data);
182 }
183
loongarch_ipi_readl(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * val)184 static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
185 {
186 int ret = 0;
187 uint32_t offset;
188 uint64_t res = 0;
189
190 offset = (uint32_t)(addr & 0x1ff);
191 WARN_ON_ONCE(offset & (len - 1));
192
193 switch (offset) {
194 case IOCSR_IPI_STATUS:
195 spin_lock(&vcpu->arch.ipi_state.lock);
196 res = vcpu->arch.ipi_state.status;
197 spin_unlock(&vcpu->arch.ipi_state.lock);
198 break;
199 case IOCSR_IPI_EN:
200 spin_lock(&vcpu->arch.ipi_state.lock);
201 res = vcpu->arch.ipi_state.en;
202 spin_unlock(&vcpu->arch.ipi_state.lock);
203 break;
204 case IOCSR_IPI_SET:
205 res = 0;
206 break;
207 case IOCSR_IPI_CLEAR:
208 res = 0;
209 break;
210 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
211 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
212 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
213 __func__, offset, len);
214 ret = -EINVAL;
215 break;
216 }
217 res = read_mailbox(vcpu, offset, len);
218 break;
219 default:
220 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
221 ret = -EINVAL;
222 break;
223 }
224 *(uint64_t *)val = res;
225
226 return ret;
227 }
228
loongarch_ipi_writel(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * val)229 static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
230 {
231 int ret = 0;
232 uint64_t data;
233 uint32_t offset;
234
235 data = *(uint64_t *)val;
236
237 offset = (uint32_t)(addr & 0x1ff);
238 WARN_ON_ONCE(offset & (len - 1));
239
240 switch (offset) {
241 case IOCSR_IPI_STATUS:
242 ret = -EINVAL;
243 break;
244 case IOCSR_IPI_EN:
245 spin_lock(&vcpu->arch.ipi_state.lock);
246 vcpu->arch.ipi_state.en = data;
247 spin_unlock(&vcpu->arch.ipi_state.lock);
248 break;
249 case IOCSR_IPI_SET:
250 ipi_set(vcpu, data);
251 break;
252 case IOCSR_IPI_CLEAR:
253 /* Just clear the status of the current vcpu */
254 ipi_clear(vcpu, data);
255 break;
256 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
257 if (offset + len > IOCSR_IPI_BUF_38 + 8) {
258 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
259 __func__, offset, len);
260 ret = -EINVAL;
261 break;
262 }
263 write_mailbox(vcpu, offset, data, len);
264 break;
265 case IOCSR_IPI_SEND:
266 ipi_send(vcpu->kvm, data);
267 break;
268 case IOCSR_MAIL_SEND:
269 ret = mail_send(vcpu->kvm, data);
270 break;
271 case IOCSR_ANY_SEND:
272 ret = any_send(vcpu->kvm, data);
273 break;
274 default:
275 kvm_err("%s: unknown addr: %llx\n", __func__, addr);
276 ret = -EINVAL;
277 break;
278 }
279
280 return ret;
281 }
282
kvm_ipi_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)283 static int kvm_ipi_read(struct kvm_vcpu *vcpu,
284 struct kvm_io_device *dev,
285 gpa_t addr, int len, void *val)
286 {
287 vcpu->stat.ipi_read_exits++;
288 return loongarch_ipi_readl(vcpu, addr, len, val);
289 }
290
kvm_ipi_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)291 static int kvm_ipi_write(struct kvm_vcpu *vcpu,
292 struct kvm_io_device *dev,
293 gpa_t addr, int len, const void *val)
294 {
295 vcpu->stat.ipi_write_exits++;
296 return loongarch_ipi_writel(vcpu, addr, len, val);
297 }
298
299 static const struct kvm_io_device_ops kvm_ipi_ops = {
300 .read = kvm_ipi_read,
301 .write = kvm_ipi_write,
302 };
303
kvm_ipi_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)304 static int kvm_ipi_regs_access(struct kvm_device *dev,
305 struct kvm_device_attr *attr,
306 bool is_write)
307 {
308 int len = 4;
309 int cpu, addr;
310 uint64_t val;
311 void *p = NULL;
312 struct kvm_vcpu *vcpu;
313
314 cpu = (attr->attr >> 16) & 0x3ff;
315 addr = attr->attr & 0xff;
316
317 vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
318 if (unlikely(vcpu == NULL)) {
319 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
320 return -EINVAL;
321 }
322
323 switch (addr) {
324 case IOCSR_IPI_STATUS:
325 p = &vcpu->arch.ipi_state.status;
326 break;
327 case IOCSR_IPI_EN:
328 p = &vcpu->arch.ipi_state.en;
329 break;
330 case IOCSR_IPI_SET:
331 p = &vcpu->arch.ipi_state.set;
332 break;
333 case IOCSR_IPI_CLEAR:
334 p = &vcpu->arch.ipi_state.clear;
335 break;
336 case IOCSR_IPI_BUF_20:
337 p = &vcpu->arch.ipi_state.buf[0];
338 len = 8;
339 break;
340 case IOCSR_IPI_BUF_28:
341 p = &vcpu->arch.ipi_state.buf[1];
342 len = 8;
343 break;
344 case IOCSR_IPI_BUF_30:
345 p = &vcpu->arch.ipi_state.buf[2];
346 len = 8;
347 break;
348 case IOCSR_IPI_BUF_38:
349 p = &vcpu->arch.ipi_state.buf[3];
350 len = 8;
351 break;
352 default:
353 kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
354 return -EINVAL;
355 }
356
357 if (is_write) {
358 if (len == 4) {
359 if (get_user(val, (uint32_t __user *)attr->addr))
360 return -EFAULT;
361 *(uint32_t *)p = (uint32_t)val;
362 } else if (len == 8) {
363 if (get_user(val, (uint64_t __user *)attr->addr))
364 return -EFAULT;
365 *(uint64_t *)p = val;
366 }
367 } else {
368 if (len == 4) {
369 val = *(uint32_t *)p;
370 return put_user(val, (uint32_t __user *)attr->addr);
371 } else if (len == 8) {
372 val = *(uint64_t *)p;
373 return put_user(val, (uint64_t __user *)attr->addr);
374 }
375 }
376
377 return 0;
378 }
379
kvm_ipi_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)380 static int kvm_ipi_get_attr(struct kvm_device *dev,
381 struct kvm_device_attr *attr)
382 {
383 switch (attr->group) {
384 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
385 return kvm_ipi_regs_access(dev, attr, false);
386 default:
387 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
388 return -EINVAL;
389 }
390 }
391
kvm_ipi_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)392 static int kvm_ipi_set_attr(struct kvm_device *dev,
393 struct kvm_device_attr *attr)
394 {
395 switch (attr->group) {
396 case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
397 return kvm_ipi_regs_access(dev, attr, true);
398 default:
399 kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
400 return -EINVAL;
401 }
402 }
403
kvm_ipi_create(struct kvm_device * dev,u32 type)404 static int kvm_ipi_create(struct kvm_device *dev, u32 type)
405 {
406 int ret;
407 struct kvm *kvm;
408 struct kvm_io_device *device;
409 struct loongarch_ipi *s;
410
411 if (!dev) {
412 kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
413 return -EINVAL;
414 }
415
416 kvm = dev->kvm;
417 if (kvm->arch.ipi) {
418 kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
419 return -EINVAL;
420 }
421
422 s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
423 if (!s)
424 return -ENOMEM;
425
426 spin_lock_init(&s->lock);
427 s->kvm = kvm;
428
429 /*
430 * Initialize IOCSR device
431 */
432 device = &s->device;
433 kvm_iodevice_init(device, &kvm_ipi_ops);
434 mutex_lock(&kvm->slots_lock);
435 ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
436 mutex_unlock(&kvm->slots_lock);
437 if (ret < 0) {
438 kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
439 goto err;
440 }
441
442 kvm->arch.ipi = s;
443 return 0;
444
445 err:
446 kfree(s);
447 return -EFAULT;
448 }
449
kvm_ipi_destroy(struct kvm_device * dev)450 static void kvm_ipi_destroy(struct kvm_device *dev)
451 {
452 struct kvm *kvm;
453 struct loongarch_ipi *ipi;
454
455 if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
456 return;
457
458 kvm = dev->kvm;
459 ipi = kvm->arch.ipi;
460 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
461 kfree(ipi);
462 kfree(dev);
463 }
464
465 static struct kvm_device_ops kvm_ipi_dev_ops = {
466 .name = "kvm-loongarch-ipi",
467 .create = kvm_ipi_create,
468 .destroy = kvm_ipi_destroy,
469 .set_attr = kvm_ipi_set_attr,
470 .get_attr = kvm_ipi_get_attr,
471 };
472
kvm_loongarch_register_ipi_device(void)473 int kvm_loongarch_register_ipi_device(void)
474 {
475 return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
476 }
477