xref: /linux/arch/loongarch/kvm/intc/ipi.c (revision 0586ade9e7f9491ccbe1e00975978cb9c2093006)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
9 
10 static void ipi_send(struct kvm *kvm, uint64_t data)
11 {
12 	int cpu, action;
13 	uint32_t status;
14 	struct kvm_vcpu *vcpu;
15 	struct kvm_interrupt irq;
16 
17 	cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
18 	vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
19 	if (unlikely(vcpu == NULL)) {
20 		kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
21 		return;
22 	}
23 
24 	action = BIT(data & 0x1f);
25 	spin_lock(&vcpu->arch.ipi_state.lock);
26 	status = vcpu->arch.ipi_state.status;
27 	vcpu->arch.ipi_state.status |= action;
28 	spin_unlock(&vcpu->arch.ipi_state.lock);
29 	if (status == 0) {
30 		irq.irq = LARCH_INT_IPI;
31 		kvm_vcpu_ioctl_interrupt(vcpu, &irq);
32 	}
33 }
34 
35 static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
36 {
37 	uint32_t status;
38 	struct kvm_interrupt irq;
39 
40 	spin_lock(&vcpu->arch.ipi_state.lock);
41 	vcpu->arch.ipi_state.status &= ~data;
42 	status = vcpu->arch.ipi_state.status;
43 	spin_unlock(&vcpu->arch.ipi_state.lock);
44 	if (status == 0) {
45 		irq.irq = -LARCH_INT_IPI;
46 		kvm_vcpu_ioctl_interrupt(vcpu, &irq);
47 	}
48 }
49 
50 static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
51 {
52 	uint64_t data = 0;
53 
54 	spin_lock(&vcpu->arch.ipi_state.lock);
55 	data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
56 	spin_unlock(&vcpu->arch.ipi_state.lock);
57 
58 	switch (len) {
59 	case 1:
60 		return data & 0xff;
61 	case 2:
62 		return data & 0xffff;
63 	case 4:
64 		return data & 0xffffffff;
65 	case 8:
66 		return data;
67 	default:
68 		kvm_err("%s: unknown data len: %d\n", __func__, len);
69 		return 0;
70 	}
71 }
72 
73 static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
74 {
75 	void *pbuf;
76 
77 	spin_lock(&vcpu->arch.ipi_state.lock);
78 	pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
79 
80 	switch (len) {
81 	case 1:
82 		*(unsigned char *)pbuf = (unsigned char)data;
83 		break;
84 	case 2:
85 		*(unsigned short *)pbuf = (unsigned short)data;
86 		break;
87 	case 4:
88 		*(unsigned int *)pbuf = (unsigned int)data;
89 		break;
90 	case 8:
91 		*(unsigned long *)pbuf = (unsigned long)data;
92 		break;
93 	default:
94 		kvm_err("%s: unknown data len: %d\n", __func__, len);
95 	}
96 	spin_unlock(&vcpu->arch.ipi_state.lock);
97 }
98 
99 static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
100 {
101 	int i, ret;
102 	uint32_t val = 0, mask = 0;
103 
104 	/*
105 	 * Bit 27-30 is mask for byte writing.
106 	 * If the mask is 0, we need not to do anything.
107 	 */
108 	if ((data >> 27) & 0xf) {
109 		/* Read the old val */
110 		ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
111 		if (unlikely(ret)) {
112 			kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
113 			return ret;
114 		}
115 		/* Construct the mask by scanning the bit 27-30 */
116 		for (i = 0; i < 4; i++) {
117 			if (data & (BIT(27 + i)))
118 				mask |= (0xff << (i * 8));
119 		}
120 		/* Save the old part of val */
121 		val &= mask;
122 	}
123 	val |= ((uint32_t)(data >> 32) & ~mask);
124 	ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
125 	if (unlikely(ret))
126 		kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
127 
128 	return ret;
129 }
130 
131 static int mail_send(struct kvm *kvm, uint64_t data)
132 {
133 	int cpu, mailbox, offset;
134 	struct kvm_vcpu *vcpu;
135 
136 	cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
137 	vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
138 	if (unlikely(vcpu == NULL)) {
139 		kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
140 		return -EINVAL;
141 	}
142 	mailbox = ((data & 0xffffffff) >> 2) & 0x7;
143 	offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4;
144 
145 	return send_ipi_data(vcpu, offset, data);
146 }
147 
148 static int any_send(struct kvm *kvm, uint64_t data)
149 {
150 	int cpu, offset;
151 	struct kvm_vcpu *vcpu;
152 
153 	cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
154 	vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
155 	if (unlikely(vcpu == NULL)) {
156 		kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
157 		return -EINVAL;
158 	}
159 	offset = data & 0xffff;
160 
161 	return send_ipi_data(vcpu, offset, data);
162 }
163 
164 static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
165 {
166 	int ret = 0;
167 	uint32_t offset;
168 	uint64_t res = 0;
169 
170 	offset = (uint32_t)(addr & 0x1ff);
171 	WARN_ON_ONCE(offset & (len - 1));
172 
173 	switch (offset) {
174 	case IOCSR_IPI_STATUS:
175 		spin_lock(&vcpu->arch.ipi_state.lock);
176 		res = vcpu->arch.ipi_state.status;
177 		spin_unlock(&vcpu->arch.ipi_state.lock);
178 		break;
179 	case IOCSR_IPI_EN:
180 		spin_lock(&vcpu->arch.ipi_state.lock);
181 		res = vcpu->arch.ipi_state.en;
182 		spin_unlock(&vcpu->arch.ipi_state.lock);
183 		break;
184 	case IOCSR_IPI_SET:
185 		res = 0;
186 		break;
187 	case IOCSR_IPI_CLEAR:
188 		res = 0;
189 		break;
190 	case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
191 		if (offset + len > IOCSR_IPI_BUF_38 + 8) {
192 			kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
193 				__func__, offset, len);
194 			ret = -EINVAL;
195 			break;
196 		}
197 		res = read_mailbox(vcpu, offset, len);
198 		break;
199 	default:
200 		kvm_err("%s: unknown addr: %llx\n", __func__, addr);
201 		ret = -EINVAL;
202 		break;
203 	}
204 	*(uint64_t *)val = res;
205 
206 	return ret;
207 }
208 
209 static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
210 {
211 	int ret = 0;
212 	uint64_t data;
213 	uint32_t offset;
214 
215 	data = *(uint64_t *)val;
216 
217 	offset = (uint32_t)(addr & 0x1ff);
218 	WARN_ON_ONCE(offset & (len - 1));
219 
220 	switch (offset) {
221 	case IOCSR_IPI_STATUS:
222 		ret = -EINVAL;
223 		break;
224 	case IOCSR_IPI_EN:
225 		spin_lock(&vcpu->arch.ipi_state.lock);
226 		vcpu->arch.ipi_state.en = data;
227 		spin_unlock(&vcpu->arch.ipi_state.lock);
228 		break;
229 	case IOCSR_IPI_SET:
230 		ret = -EINVAL;
231 		break;
232 	case IOCSR_IPI_CLEAR:
233 		/* Just clear the status of the current vcpu */
234 		ipi_clear(vcpu, data);
235 		break;
236 	case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
237 		if (offset + len > IOCSR_IPI_BUF_38 + 8) {
238 			kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
239 				__func__, offset, len);
240 			ret = -EINVAL;
241 			break;
242 		}
243 		write_mailbox(vcpu, offset, data, len);
244 		break;
245 	case IOCSR_IPI_SEND:
246 		ipi_send(vcpu->kvm, data);
247 		break;
248 	case IOCSR_MAIL_SEND:
249 		ret = mail_send(vcpu->kvm, *(uint64_t *)val);
250 		break;
251 	case IOCSR_ANY_SEND:
252 		ret = any_send(vcpu->kvm, *(uint64_t *)val);
253 		break;
254 	default:
255 		kvm_err("%s: unknown addr: %llx\n", __func__, addr);
256 		ret = -EINVAL;
257 		break;
258 	}
259 
260 	return ret;
261 }
262 
263 static int kvm_ipi_read(struct kvm_vcpu *vcpu,
264 			struct kvm_io_device *dev,
265 			gpa_t addr, int len, void *val)
266 {
267 	int ret;
268 	struct loongarch_ipi *ipi;
269 
270 	ipi = vcpu->kvm->arch.ipi;
271 	if (!ipi) {
272 		kvm_err("%s: ipi irqchip not valid!\n", __func__);
273 		return -EINVAL;
274 	}
275 	ipi->kvm->stat.ipi_read_exits++;
276 	ret = loongarch_ipi_readl(vcpu, addr, len, val);
277 
278 	return ret;
279 }
280 
281 static int kvm_ipi_write(struct kvm_vcpu *vcpu,
282 			struct kvm_io_device *dev,
283 			gpa_t addr, int len, const void *val)
284 {
285 	int ret;
286 	struct loongarch_ipi *ipi;
287 
288 	ipi = vcpu->kvm->arch.ipi;
289 	if (!ipi) {
290 		kvm_err("%s: ipi irqchip not valid!\n", __func__);
291 		return -EINVAL;
292 	}
293 	ipi->kvm->stat.ipi_write_exits++;
294 	ret = loongarch_ipi_writel(vcpu, addr, len, val);
295 
296 	return ret;
297 }
298 
299 static const struct kvm_io_device_ops kvm_ipi_ops = {
300 	.read	= kvm_ipi_read,
301 	.write	= kvm_ipi_write,
302 };
303 
304 static int kvm_ipi_regs_access(struct kvm_device *dev,
305 				struct kvm_device_attr *attr,
306 				bool is_write)
307 {
308 	int len = 4;
309 	int cpu, addr;
310 	uint64_t val;
311 	void *p = NULL;
312 	struct kvm_vcpu *vcpu;
313 
314 	cpu = (attr->attr >> 16) & 0x3ff;
315 	addr = attr->attr & 0xff;
316 
317 	vcpu = kvm_get_vcpu(dev->kvm, cpu);
318 	if (unlikely(vcpu == NULL)) {
319 		kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
320 		return -EINVAL;
321 	}
322 
323 	switch (addr) {
324 	case IOCSR_IPI_STATUS:
325 		p = &vcpu->arch.ipi_state.status;
326 		break;
327 	case IOCSR_IPI_EN:
328 		p = &vcpu->arch.ipi_state.en;
329 		break;
330 	case IOCSR_IPI_SET:
331 		p = &vcpu->arch.ipi_state.set;
332 		break;
333 	case IOCSR_IPI_CLEAR:
334 		p = &vcpu->arch.ipi_state.clear;
335 		break;
336 	case IOCSR_IPI_BUF_20:
337 		p = &vcpu->arch.ipi_state.buf[0];
338 		len = 8;
339 		break;
340 	case IOCSR_IPI_BUF_28:
341 		p = &vcpu->arch.ipi_state.buf[1];
342 		len = 8;
343 		break;
344 	case IOCSR_IPI_BUF_30:
345 		p = &vcpu->arch.ipi_state.buf[2];
346 		len = 8;
347 		break;
348 	case IOCSR_IPI_BUF_38:
349 		p = &vcpu->arch.ipi_state.buf[3];
350 		len = 8;
351 		break;
352 	default:
353 		kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
354 		return -EINVAL;
355 	}
356 
357 	if (is_write) {
358 		if (len == 4) {
359 			if (get_user(val, (uint32_t __user *)attr->addr))
360 				return -EFAULT;
361 			*(uint32_t *)p = (uint32_t)val;
362 		} else if (len == 8) {
363 			if (get_user(val, (uint64_t __user *)attr->addr))
364 				return -EFAULT;
365 			*(uint64_t *)p = val;
366 		}
367 	} else {
368 		if (len == 4) {
369 			val = *(uint32_t *)p;
370 			return put_user(val, (uint32_t __user *)attr->addr);
371 		} else if (len == 8) {
372 			val = *(uint64_t *)p;
373 			return put_user(val, (uint64_t __user *)attr->addr);
374 		}
375 	}
376 
377 	return 0;
378 }
379 
380 static int kvm_ipi_get_attr(struct kvm_device *dev,
381 			struct kvm_device_attr *attr)
382 {
383 	switch (attr->group) {
384 	case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
385 		return kvm_ipi_regs_access(dev, attr, false);
386 	default:
387 		kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
388 		return -EINVAL;
389 	}
390 }
391 
392 static int kvm_ipi_set_attr(struct kvm_device *dev,
393 			struct kvm_device_attr *attr)
394 {
395 	switch (attr->group) {
396 	case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
397 		return kvm_ipi_regs_access(dev, attr, true);
398 	default:
399 		kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
400 		return -EINVAL;
401 	}
402 }
403 
404 static int kvm_ipi_create(struct kvm_device *dev, u32 type)
405 {
406 	int ret;
407 	struct kvm *kvm;
408 	struct kvm_io_device *device;
409 	struct loongarch_ipi *s;
410 
411 	if (!dev) {
412 		kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
413 		return -EINVAL;
414 	}
415 
416 	kvm = dev->kvm;
417 	if (kvm->arch.ipi) {
418 		kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
419 		return -EINVAL;
420 	}
421 
422 	s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
423 	if (!s)
424 		return -ENOMEM;
425 
426 	spin_lock_init(&s->lock);
427 	s->kvm = kvm;
428 
429 	/*
430 	 * Initialize IOCSR device
431 	 */
432 	device = &s->device;
433 	kvm_iodevice_init(device, &kvm_ipi_ops);
434 	mutex_lock(&kvm->slots_lock);
435 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
436 	mutex_unlock(&kvm->slots_lock);
437 	if (ret < 0) {
438 		kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
439 		goto err;
440 	}
441 
442 	kvm->arch.ipi = s;
443 	return 0;
444 
445 err:
446 	kfree(s);
447 	return -EFAULT;
448 }
449 
450 static void kvm_ipi_destroy(struct kvm_device *dev)
451 {
452 	struct kvm *kvm;
453 	struct loongarch_ipi *ipi;
454 
455 	if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
456 		return;
457 
458 	kvm = dev->kvm;
459 	ipi = kvm->arch.ipi;
460 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
461 	kfree(ipi);
462 }
463 
464 static struct kvm_device_ops kvm_ipi_dev_ops = {
465 	.name = "kvm-loongarch-ipi",
466 	.create = kvm_ipi_create,
467 	.destroy = kvm_ipi_destroy,
468 	.set_attr = kvm_ipi_set_attr,
469 	.get_attr = kvm_ipi_get_attr,
470 };
471 
472 int kvm_loongarch_register_ipi_device(void)
473 {
474 	return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
475 }
476