xref: /linux/arch/loongarch/kvm/intc/eiointc.c (revision 3191df0a4882c827cac29925e80ecb1775b904bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
9 
10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11 {
12 	int ipnum, cpu, cpuid, irq;
13 	struct kvm_vcpu *vcpu;
14 
15 	for (irq = 0; irq < EIOINTC_IRQS; irq++) {
16 		ipnum = s->ipmap.reg_u8[irq / 32];
17 		if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
18 			ipnum = count_trailing_zeros(ipnum);
19 			ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
20 		}
21 
22 		cpuid = s->coremap.reg_u8[irq];
23 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
24 		if (!vcpu)
25 			continue;
26 
27 		cpu = vcpu->vcpu_id;
28 		if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
29 			__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
30 		else
31 			__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
32 	}
33 }
34 
35 static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
36 {
37 	int ipnum, cpu, found;
38 	struct kvm_vcpu *vcpu;
39 	struct kvm_interrupt vcpu_irq;
40 
41 	ipnum = s->ipmap.reg_u8[irq / 32];
42 	if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
43 		ipnum = count_trailing_zeros(ipnum);
44 		ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
45 	}
46 
47 	cpu = s->sw_coremap[irq];
48 	vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
49 	if (unlikely(vcpu == NULL)) {
50 		kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
51 		return;
52 	}
53 
54 	if (level) {
55 		/* if not enable return false */
56 		if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
57 			return;
58 		__set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
59 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
60 		__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
61 	} else {
62 		__clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
63 		__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
64 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
65 	}
66 
67 	if (found < EIOINTC_IRQS)
68 		return; /* other irq is handling, needn't update parent irq */
69 
70 	vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
71 	kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
72 }
73 
74 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
75 					int irq, u64 val, u32 len, bool notify)
76 {
77 	int i, cpu, cpuid;
78 	struct kvm_vcpu *vcpu;
79 
80 	for (i = 0; i < len; i++) {
81 		cpuid = val & 0xff;
82 		val = val >> 8;
83 
84 		if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
85 			cpuid = ffs(cpuid) - 1;
86 			cpuid = (cpuid >= 4) ? 0 : cpuid;
87 		}
88 
89 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
90 		if (!vcpu)
91 			continue;
92 
93 		cpu = vcpu->vcpu_id;
94 		if (s->sw_coremap[irq + i] == cpu)
95 			continue;
96 
97 		if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
98 			/* lower irq at old cpu and raise irq at new cpu */
99 			eiointc_update_irq(s, irq + i, 0);
100 			s->sw_coremap[irq + i] = cpu;
101 			eiointc_update_irq(s, irq + i, 1);
102 		} else {
103 			s->sw_coremap[irq + i] = cpu;
104 		}
105 	}
106 }
107 
108 void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
109 {
110 	unsigned long flags;
111 	unsigned long *isr = (unsigned long *)s->isr.reg_u8;
112 
113 	spin_lock_irqsave(&s->lock, flags);
114 	level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
115 	eiointc_update_irq(s, irq, level);
116 	spin_unlock_irqrestore(&s->lock, flags);
117 }
118 
119 static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
120 				gpa_t addr, unsigned long *val)
121 {
122 	int index, ret = 0;
123 	u64 data = 0;
124 	gpa_t offset;
125 
126 	offset = addr - EIOINTC_BASE;
127 	switch (offset) {
128 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
129 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
130 		data = s->nodetype.reg_u64[index];
131 		break;
132 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
133 		index = (offset - EIOINTC_IPMAP_START) >> 3;
134 		data = s->ipmap.reg_u64;
135 		break;
136 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
137 		index = (offset - EIOINTC_ENABLE_START) >> 3;
138 		data = s->enable.reg_u64[index];
139 		break;
140 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
141 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
142 		data = s->bounce.reg_u64[index];
143 		break;
144 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
145 		index = (offset - EIOINTC_COREISR_START) >> 3;
146 		data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
147 		break;
148 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
149 		index = (offset - EIOINTC_COREMAP_START) >> 3;
150 		data = s->coremap.reg_u64[index];
151 		break;
152 	default:
153 		ret = -EINVAL;
154 		break;
155 	}
156 	*val = data;
157 
158 	return ret;
159 }
160 
161 static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
162 			struct kvm_io_device *dev,
163 			gpa_t addr, int len, void *val)
164 {
165 	int ret = -EINVAL;
166 	unsigned long flags, data, offset;
167 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
168 
169 	if (!eiointc) {
170 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
171 		return -EINVAL;
172 	}
173 
174 	if (addr & (len - 1)) {
175 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
176 		return -EINVAL;
177 	}
178 
179 	offset = addr & 0x7;
180 	addr -= offset;
181 	vcpu->stat.eiointc_read_exits++;
182 	spin_lock_irqsave(&eiointc->lock, flags);
183 	ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
184 	spin_unlock_irqrestore(&eiointc->lock, flags);
185 	if (ret)
186 		return ret;
187 
188 	data = data >> (offset * 8);
189 	switch (len) {
190 	case 1:
191 		*(long *)val = (s8)data;
192 		break;
193 	case 2:
194 		*(long *)val = (s16)data;
195 		break;
196 	case 4:
197 		*(long *)val = (s32)data;
198 		break;
199 	default:
200 		*(long *)val = (long)data;
201 		break;
202 	}
203 
204 	return 0;
205 }
206 
207 static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
208 				struct loongarch_eiointc *s,
209 				gpa_t addr, u64 value, u64 field_mask)
210 {
211 	int index, irq, ret = 0;
212 	u8 cpu;
213 	u64 data, old, mask;
214 	gpa_t offset;
215 
216 	offset = addr & 7;
217 	mask = field_mask << (offset * 8);
218 	data = (value & field_mask) << (offset * 8);
219 
220 	addr -= offset;
221 	offset = addr - EIOINTC_BASE;
222 
223 	switch (offset) {
224 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
225 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
226 		old = s->nodetype.reg_u64[index];
227 		s->nodetype.reg_u64[index] = (old & ~mask) | data;
228 		break;
229 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
230 		/*
231 		 * ipmap cannot be set at runtime, can be set only at the beginning
232 		 * of irqchip driver, need not update upper irq level
233 		 */
234 		old = s->ipmap.reg_u64;
235 		s->ipmap.reg_u64 = (old & ~mask) | data;
236 		break;
237 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
238 		index = (offset - EIOINTC_ENABLE_START) >> 3;
239 		old = s->enable.reg_u64[index];
240 		s->enable.reg_u64[index] = (old & ~mask) | data;
241 		/*
242 		 * 1: enable irq.
243 		 * update irq when isr is set.
244 		 */
245 		data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
246 		while (data) {
247 			irq = __ffs(data);
248 			eiointc_update_irq(s, irq + index * 64, 1);
249 			data &= ~BIT_ULL(irq);
250 		}
251 		/*
252 		 * 0: disable irq.
253 		 * update irq when isr is set.
254 		 */
255 		data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
256 		while (data) {
257 			irq = __ffs(data);
258 			eiointc_update_irq(s, irq + index * 64, 0);
259 			data &= ~BIT_ULL(irq);
260 		}
261 		break;
262 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
263 		/* do not emulate hw bounced irq routing */
264 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
265 		old = s->bounce.reg_u64[index];
266 		s->bounce.reg_u64[index] = (old & ~mask) | data;
267 		break;
268 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
269 		index = (offset - EIOINTC_COREISR_START) >> 3;
270 		/* use attrs to get current cpu index */
271 		cpu = vcpu->vcpu_id;
272 		old = s->coreisr.reg_u64[cpu][index];
273 		/* write 1 to clear interrupt */
274 		s->coreisr.reg_u64[cpu][index] = old & ~data;
275 		data &= old;
276 		while (data) {
277 			irq = __ffs(data);
278 			eiointc_update_irq(s, irq + index * 64, 0);
279 			data &= ~BIT_ULL(irq);
280 		}
281 		break;
282 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
283 		index = (offset - EIOINTC_COREMAP_START) >> 3;
284 		old = s->coremap.reg_u64[index];
285 		s->coremap.reg_u64[index] = (old & ~mask) | data;
286 		data = s->coremap.reg_u64[index];
287 		eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
288 		break;
289 	default:
290 		ret = -EINVAL;
291 		break;
292 	}
293 
294 	return ret;
295 }
296 
297 static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
298 			struct kvm_io_device *dev,
299 			gpa_t addr, int len, const void *val)
300 {
301 	int ret = -EINVAL;
302 	unsigned long flags, value;
303 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
304 
305 	if (!eiointc) {
306 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
307 		return -EINVAL;
308 	}
309 
310 	if (addr & (len - 1)) {
311 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
312 		return -EINVAL;
313 	}
314 
315 	vcpu->stat.eiointc_write_exits++;
316 	spin_lock_irqsave(&eiointc->lock, flags);
317 	switch (len) {
318 	case 1:
319 		value = *(unsigned char *)val;
320 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
321 		break;
322 	case 2:
323 		value = *(unsigned short *)val;
324 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
325 		break;
326 	case 4:
327 		value = *(unsigned int *)val;
328 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
329 		break;
330 	default:
331 		value = *(unsigned long *)val;
332 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
333 		break;
334 	}
335 	spin_unlock_irqrestore(&eiointc->lock, flags);
336 
337 	return ret;
338 }
339 
340 static const struct kvm_io_device_ops kvm_eiointc_ops = {
341 	.read	= kvm_eiointc_read,
342 	.write	= kvm_eiointc_write,
343 };
344 
345 static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
346 				struct kvm_io_device *dev,
347 				gpa_t addr, int len, void *val)
348 {
349 	unsigned long flags;
350 	u32 *data = val;
351 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
352 
353 	if (!eiointc) {
354 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
355 		return -EINVAL;
356 	}
357 
358 	addr -= EIOINTC_VIRT_BASE;
359 	spin_lock_irqsave(&eiointc->lock, flags);
360 	switch (addr) {
361 	case EIOINTC_VIRT_FEATURES:
362 		*data = eiointc->features;
363 		break;
364 	case EIOINTC_VIRT_CONFIG:
365 		*data = eiointc->status;
366 		break;
367 	default:
368 		break;
369 	}
370 	spin_unlock_irqrestore(&eiointc->lock, flags);
371 
372 	return 0;
373 }
374 
375 static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
376 				struct kvm_io_device *dev,
377 				gpa_t addr, int len, const void *val)
378 {
379 	int ret = 0;
380 	unsigned long flags;
381 	u32 value = *(u32 *)val;
382 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
383 
384 	if (!eiointc) {
385 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
386 		return -EINVAL;
387 	}
388 
389 	addr -= EIOINTC_VIRT_BASE;
390 	spin_lock_irqsave(&eiointc->lock, flags);
391 	switch (addr) {
392 	case EIOINTC_VIRT_FEATURES:
393 		ret = -EPERM;
394 		break;
395 	case EIOINTC_VIRT_CONFIG:
396 		/*
397 		 * eiointc features can only be set at disabled status
398 		 */
399 		if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
400 			ret = -EPERM;
401 			break;
402 		}
403 		eiointc->status = value & eiointc->features;
404 		break;
405 	default:
406 		break;
407 	}
408 	spin_unlock_irqrestore(&eiointc->lock, flags);
409 
410 	return ret;
411 }
412 
413 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
414 	.read	= kvm_eiointc_virt_read,
415 	.write	= kvm_eiointc_virt_write,
416 };
417 
418 static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
419 					struct kvm_device_attr *attr)
420 {
421 	int ret = 0;
422 	unsigned long flags;
423 	unsigned long type = (unsigned long)attr->attr;
424 	u32 i, start_irq, val;
425 	void __user *data;
426 	struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
427 
428 	data = (void __user *)attr->addr;
429 	spin_lock_irqsave(&s->lock, flags);
430 	switch (type) {
431 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
432 		if (copy_from_user(&val, data, 4))
433 			ret = -EFAULT;
434 		else {
435 			if (val >= EIOINTC_ROUTE_MAX_VCPUS)
436 				ret = -EINVAL;
437 			else
438 				s->num_cpu = val;
439 		}
440 		break;
441 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
442 		if (copy_from_user(&s->features, data, 4))
443 			ret = -EFAULT;
444 		if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
445 			s->status |= BIT(EIOINTC_ENABLE);
446 		break;
447 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
448 		eiointc_set_sw_coreisr(s);
449 		for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
450 			start_irq = i * 4;
451 			eiointc_update_sw_coremap(s, start_irq,
452 					s->coremap.reg_u32[i], sizeof(u32), false);
453 		}
454 		break;
455 	default:
456 		break;
457 	}
458 	spin_unlock_irqrestore(&s->lock, flags);
459 
460 	return ret;
461 }
462 
463 static int kvm_eiointc_regs_access(struct kvm_device *dev,
464 					struct kvm_device_attr *attr,
465 					bool is_write)
466 {
467 	int addr, cpu, offset, ret = 0;
468 	unsigned long flags;
469 	void *p = NULL;
470 	void __user *data;
471 	struct loongarch_eiointc *s;
472 
473 	s = dev->kvm->arch.eiointc;
474 	addr = attr->attr;
475 	cpu = addr >> 16;
476 	addr &= 0xffff;
477 	data = (void __user *)attr->addr;
478 	switch (addr) {
479 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
480 		offset = (addr - EIOINTC_NODETYPE_START) / 4;
481 		p = &s->nodetype.reg_u32[offset];
482 		break;
483 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
484 		offset = (addr - EIOINTC_IPMAP_START) / 4;
485 		p = &s->ipmap.reg_u32[offset];
486 		break;
487 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
488 		offset = (addr - EIOINTC_ENABLE_START) / 4;
489 		p = &s->enable.reg_u32[offset];
490 		break;
491 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
492 		offset = (addr - EIOINTC_BOUNCE_START) / 4;
493 		p = &s->bounce.reg_u32[offset];
494 		break;
495 	case EIOINTC_ISR_START ... EIOINTC_ISR_END:
496 		offset = (addr - EIOINTC_ISR_START) / 4;
497 		p = &s->isr.reg_u32[offset];
498 		break;
499 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
500 		if (cpu >= s->num_cpu)
501 			return -EINVAL;
502 
503 		offset = (addr - EIOINTC_COREISR_START) / 4;
504 		p = &s->coreisr.reg_u32[cpu][offset];
505 		break;
506 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
507 		offset = (addr - EIOINTC_COREMAP_START) / 4;
508 		p = &s->coremap.reg_u32[offset];
509 		break;
510 	default:
511 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
512 		return -EINVAL;
513 	}
514 
515 	spin_lock_irqsave(&s->lock, flags);
516 	if (is_write) {
517 		if (copy_from_user(p, data, 4))
518 			ret = -EFAULT;
519 	} else {
520 		if (copy_to_user(data, p, 4))
521 			ret = -EFAULT;
522 	}
523 	spin_unlock_irqrestore(&s->lock, flags);
524 
525 	return ret;
526 }
527 
528 static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
529 					struct kvm_device_attr *attr,
530 					bool is_write)
531 {
532 	int addr, ret = 0;
533 	unsigned long flags;
534 	void *p = NULL;
535 	void __user *data;
536 	struct loongarch_eiointc *s;
537 
538 	s = dev->kvm->arch.eiointc;
539 	addr = attr->attr;
540 	addr &= 0xffff;
541 
542 	data = (void __user *)attr->addr;
543 	switch (addr) {
544 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
545 		if (is_write)
546 			return ret;
547 
548 		p = &s->num_cpu;
549 		break;
550 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
551 		if (is_write)
552 			return ret;
553 
554 		p = &s->features;
555 		break;
556 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
557 		p = &s->status;
558 		break;
559 	default:
560 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
561 		return -EINVAL;
562 	}
563 	spin_lock_irqsave(&s->lock, flags);
564 	if (is_write) {
565 		if (copy_from_user(p, data, 4))
566 			ret = -EFAULT;
567 	} else {
568 		if (copy_to_user(data, p, 4))
569 			ret = -EFAULT;
570 	}
571 	spin_unlock_irqrestore(&s->lock, flags);
572 
573 	return ret;
574 }
575 
576 static int kvm_eiointc_get_attr(struct kvm_device *dev,
577 				struct kvm_device_attr *attr)
578 {
579 	switch (attr->group) {
580 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
581 		return kvm_eiointc_regs_access(dev, attr, false);
582 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
583 		return kvm_eiointc_sw_status_access(dev, attr, false);
584 	default:
585 		return -EINVAL;
586 	}
587 }
588 
589 static int kvm_eiointc_set_attr(struct kvm_device *dev,
590 				struct kvm_device_attr *attr)
591 {
592 	switch (attr->group) {
593 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
594 		return kvm_eiointc_ctrl_access(dev, attr);
595 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
596 		return kvm_eiointc_regs_access(dev, attr, true);
597 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
598 		return kvm_eiointc_sw_status_access(dev, attr, true);
599 	default:
600 		return -EINVAL;
601 	}
602 }
603 
604 static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
605 {
606 	int ret;
607 	struct loongarch_eiointc *s;
608 	struct kvm_io_device *device;
609 	struct kvm *kvm = dev->kvm;
610 
611 	/* eiointc has been created */
612 	if (kvm->arch.eiointc)
613 		return -EINVAL;
614 
615 	s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
616 	if (!s)
617 		return -ENOMEM;
618 
619 	spin_lock_init(&s->lock);
620 	s->kvm = kvm;
621 
622 	/*
623 	 * Initialize IOCSR device
624 	 */
625 	device = &s->device;
626 	kvm_iodevice_init(device, &kvm_eiointc_ops);
627 	mutex_lock(&kvm->slots_lock);
628 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
629 			EIOINTC_BASE, EIOINTC_SIZE, device);
630 	mutex_unlock(&kvm->slots_lock);
631 	if (ret < 0) {
632 		kfree(s);
633 		return ret;
634 	}
635 
636 	device = &s->device_vext;
637 	kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
638 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
639 			EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
640 	if (ret < 0) {
641 		kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
642 		kfree(s);
643 		return ret;
644 	}
645 	kvm->arch.eiointc = s;
646 
647 	return 0;
648 }
649 
650 static void kvm_eiointc_destroy(struct kvm_device *dev)
651 {
652 	struct kvm *kvm;
653 	struct loongarch_eiointc *eiointc;
654 
655 	if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
656 		return;
657 
658 	kvm = dev->kvm;
659 	eiointc = kvm->arch.eiointc;
660 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
661 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
662 	kfree(eiointc);
663 }
664 
665 static struct kvm_device_ops kvm_eiointc_dev_ops = {
666 	.name = "kvm-loongarch-eiointc",
667 	.create = kvm_eiointc_create,
668 	.destroy = kvm_eiointc_destroy,
669 	.set_attr = kvm_eiointc_set_attr,
670 	.get_attr = kvm_eiointc_get_attr,
671 };
672 
673 int kvm_loongarch_register_eiointc_device(void)
674 {
675 	return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
676 }
677