xref: /linux/arch/loongarch/kvm/intc/eiointc.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
9 
10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11 {
12 	int ipnum, cpu, irq_index, irq_mask, irq;
13 
14 	for (irq = 0; irq < EIOINTC_IRQS; irq++) {
15 		ipnum = s->ipmap.reg_u8[irq / 32];
16 		if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
17 			ipnum = count_trailing_zeros(ipnum);
18 			ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
19 		}
20 		irq_index = irq / 32;
21 		irq_mask = BIT(irq & 0x1f);
22 
23 		cpu = s->coremap.reg_u8[irq];
24 		if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
25 			set_bit(irq, s->sw_coreisr[cpu][ipnum]);
26 		else
27 			clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
28 	}
29 }
30 
31 static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
32 {
33 	int ipnum, cpu, found, irq_index, irq_mask;
34 	struct kvm_vcpu *vcpu;
35 	struct kvm_interrupt vcpu_irq;
36 
37 	ipnum = s->ipmap.reg_u8[irq / 32];
38 	if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
39 		ipnum = count_trailing_zeros(ipnum);
40 		ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
41 	}
42 
43 	cpu = s->sw_coremap[irq];
44 	vcpu = kvm_get_vcpu(s->kvm, cpu);
45 	irq_index = irq / 32;
46 	irq_mask = BIT(irq & 0x1f);
47 
48 	if (level) {
49 		/* if not enable return false */
50 		if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
51 			return;
52 		s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
53 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
54 		set_bit(irq, s->sw_coreisr[cpu][ipnum]);
55 	} else {
56 		s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
57 		clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
58 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
59 	}
60 
61 	if (found < EIOINTC_IRQS)
62 		return; /* other irq is handling, needn't update parent irq */
63 
64 	vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
65 	kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
66 }
67 
68 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
69 					int irq, void *pvalue, u32 len, bool notify)
70 {
71 	int i, cpu;
72 	u64 val = *(u64 *)pvalue;
73 
74 	for (i = 0; i < len; i++) {
75 		cpu = val & 0xff;
76 		val = val >> 8;
77 
78 		if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
79 			cpu = ffs(cpu) - 1;
80 			cpu = (cpu >= 4) ? 0 : cpu;
81 		}
82 
83 		if (s->sw_coremap[irq + i] == cpu)
84 			continue;
85 
86 		if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
87 			/* lower irq at old cpu and raise irq at new cpu */
88 			eiointc_update_irq(s, irq + i, 0);
89 			s->sw_coremap[irq + i] = cpu;
90 			eiointc_update_irq(s, irq + i, 1);
91 		} else {
92 			s->sw_coremap[irq + i] = cpu;
93 		}
94 	}
95 }
96 
97 void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
98 {
99 	unsigned long flags;
100 	unsigned long *isr = (unsigned long *)s->isr.reg_u8;
101 
102 	level ? set_bit(irq, isr) : clear_bit(irq, isr);
103 	spin_lock_irqsave(&s->lock, flags);
104 	eiointc_update_irq(s, irq, level);
105 	spin_unlock_irqrestore(&s->lock, flags);
106 }
107 
108 static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
109 		struct loongarch_eiointc *s, int index, u8 mask, int level)
110 {
111 	u8 val;
112 	int irq;
113 
114 	val = mask & s->isr.reg_u8[index];
115 	irq = ffs(val);
116 	while (irq != 0) {
117 		/*
118 		 * enable bit change from 0 to 1,
119 		 * need to update irq by pending bits
120 		 */
121 		eiointc_update_irq(s, irq - 1 + index * 8, level);
122 		val &= ~BIT(irq - 1);
123 		irq = ffs(val);
124 	}
125 }
126 
127 static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
128 				gpa_t addr, int len, void *val)
129 {
130 	int index, ret = 0;
131 	u8 data = 0;
132 	gpa_t offset;
133 
134 	offset = addr - EIOINTC_BASE;
135 	switch (offset) {
136 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
137 		index = offset - EIOINTC_NODETYPE_START;
138 		data = s->nodetype.reg_u8[index];
139 		break;
140 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
141 		index = offset - EIOINTC_IPMAP_START;
142 		data = s->ipmap.reg_u8[index];
143 		break;
144 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
145 		index = offset - EIOINTC_ENABLE_START;
146 		data = s->enable.reg_u8[index];
147 		break;
148 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
149 		index = offset - EIOINTC_BOUNCE_START;
150 		data = s->bounce.reg_u8[index];
151 		break;
152 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
153 		index = offset - EIOINTC_COREISR_START;
154 		data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
155 		break;
156 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
157 		index = offset - EIOINTC_COREMAP_START;
158 		data = s->coremap.reg_u8[index];
159 		break;
160 	default:
161 		ret = -EINVAL;
162 		break;
163 	}
164 	*(u8 *)val = data;
165 
166 	return ret;
167 }
168 
169 static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
170 				gpa_t addr, int len, void *val)
171 {
172 	int index, ret = 0;
173 	u16 data = 0;
174 	gpa_t offset;
175 
176 	offset = addr - EIOINTC_BASE;
177 	switch (offset) {
178 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
179 		index = (offset - EIOINTC_NODETYPE_START) >> 1;
180 		data = s->nodetype.reg_u16[index];
181 		break;
182 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
183 		index = (offset - EIOINTC_IPMAP_START) >> 1;
184 		data = s->ipmap.reg_u16[index];
185 		break;
186 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
187 		index = (offset - EIOINTC_ENABLE_START) >> 1;
188 		data = s->enable.reg_u16[index];
189 		break;
190 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
191 		index = (offset - EIOINTC_BOUNCE_START) >> 1;
192 		data = s->bounce.reg_u16[index];
193 		break;
194 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
195 		index = (offset - EIOINTC_COREISR_START) >> 1;
196 		data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
197 		break;
198 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
199 		index = (offset - EIOINTC_COREMAP_START) >> 1;
200 		data = s->coremap.reg_u16[index];
201 		break;
202 	default:
203 		ret = -EINVAL;
204 		break;
205 	}
206 	*(u16 *)val = data;
207 
208 	return ret;
209 }
210 
211 static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
212 				gpa_t addr, int len, void *val)
213 {
214 	int index, ret = 0;
215 	u32 data = 0;
216 	gpa_t offset;
217 
218 	offset = addr - EIOINTC_BASE;
219 	switch (offset) {
220 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
221 		index = (offset - EIOINTC_NODETYPE_START) >> 2;
222 		data = s->nodetype.reg_u32[index];
223 		break;
224 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
225 		index = (offset - EIOINTC_IPMAP_START) >> 2;
226 		data = s->ipmap.reg_u32[index];
227 		break;
228 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
229 		index = (offset - EIOINTC_ENABLE_START) >> 2;
230 		data = s->enable.reg_u32[index];
231 		break;
232 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
233 		index = (offset - EIOINTC_BOUNCE_START) >> 2;
234 		data = s->bounce.reg_u32[index];
235 		break;
236 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
237 		index = (offset - EIOINTC_COREISR_START) >> 2;
238 		data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
239 		break;
240 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
241 		index = (offset - EIOINTC_COREMAP_START) >> 2;
242 		data = s->coremap.reg_u32[index];
243 		break;
244 	default:
245 		ret = -EINVAL;
246 		break;
247 	}
248 	*(u32 *)val = data;
249 
250 	return ret;
251 }
252 
253 static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
254 				gpa_t addr, int len, void *val)
255 {
256 	int index, ret = 0;
257 	u64 data = 0;
258 	gpa_t offset;
259 
260 	offset = addr - EIOINTC_BASE;
261 	switch (offset) {
262 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
263 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
264 		data = s->nodetype.reg_u64[index];
265 		break;
266 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
267 		index = (offset - EIOINTC_IPMAP_START) >> 3;
268 		data = s->ipmap.reg_u64;
269 		break;
270 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
271 		index = (offset - EIOINTC_ENABLE_START) >> 3;
272 		data = s->enable.reg_u64[index];
273 		break;
274 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
275 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
276 		data = s->bounce.reg_u64[index];
277 		break;
278 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
279 		index = (offset - EIOINTC_COREISR_START) >> 3;
280 		data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
281 		break;
282 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
283 		index = (offset - EIOINTC_COREMAP_START) >> 3;
284 		data = s->coremap.reg_u64[index];
285 		break;
286 	default:
287 		ret = -EINVAL;
288 		break;
289 	}
290 	*(u64 *)val = data;
291 
292 	return ret;
293 }
294 
295 static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
296 			struct kvm_io_device *dev,
297 			gpa_t addr, int len, void *val)
298 {
299 	int ret = -EINVAL;
300 	unsigned long flags;
301 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
302 
303 	if (!eiointc) {
304 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
305 		return -EINVAL;
306 	}
307 
308 	vcpu->kvm->stat.eiointc_read_exits++;
309 	spin_lock_irqsave(&eiointc->lock, flags);
310 	switch (len) {
311 	case 1:
312 		ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
313 		break;
314 	case 2:
315 		ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
316 		break;
317 	case 4:
318 		ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
319 		break;
320 	case 8:
321 		ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
322 		break;
323 	default:
324 		WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
325 						__func__, addr, len);
326 	}
327 	spin_unlock_irqrestore(&eiointc->lock, flags);
328 
329 	return ret;
330 }
331 
332 static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
333 				struct loongarch_eiointc *s,
334 				gpa_t addr, int len, const void *val)
335 {
336 	int index, irq, bits, ret = 0;
337 	u8 cpu;
338 	u8 data, old_data;
339 	u8 coreisr, old_coreisr;
340 	gpa_t offset;
341 
342 	data = *(u8 *)val;
343 	offset = addr - EIOINTC_BASE;
344 
345 	switch (offset) {
346 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
347 		index = (offset - EIOINTC_NODETYPE_START);
348 		s->nodetype.reg_u8[index] = data;
349 		break;
350 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
351 		/*
352 		 * ipmap cannot be set at runtime, can be set only at the beginning
353 		 * of irqchip driver, need not update upper irq level
354 		 */
355 		index = (offset - EIOINTC_IPMAP_START);
356 		s->ipmap.reg_u8[index] = data;
357 		break;
358 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
359 		index = (offset - EIOINTC_ENABLE_START);
360 		old_data = s->enable.reg_u8[index];
361 		s->enable.reg_u8[index] = data;
362 		/*
363 		 * 1: enable irq.
364 		 * update irq when isr is set.
365 		 */
366 		data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
367 		eiointc_enable_irq(vcpu, s, index, data, 1);
368 		/*
369 		 * 0: disable irq.
370 		 * update irq when isr is set.
371 		 */
372 		data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
373 		eiointc_enable_irq(vcpu, s, index, data, 0);
374 		break;
375 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
376 		/* do not emulate hw bounced irq routing */
377 		index = offset - EIOINTC_BOUNCE_START;
378 		s->bounce.reg_u8[index] = data;
379 		break;
380 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
381 		index = (offset - EIOINTC_COREISR_START);
382 		/* use attrs to get current cpu index */
383 		cpu = vcpu->vcpu_id;
384 		coreisr = data;
385 		old_coreisr = s->coreisr.reg_u8[cpu][index];
386 		/* write 1 to clear interrupt */
387 		s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
388 		coreisr &= old_coreisr;
389 		bits = sizeof(data) * 8;
390 		irq = find_first_bit((void *)&coreisr, bits);
391 		while (irq < bits) {
392 			eiointc_update_irq(s, irq + index * bits, 0);
393 			bitmap_clear((void *)&coreisr, irq, 1);
394 			irq = find_first_bit((void *)&coreisr, bits);
395 		}
396 		break;
397 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
398 		irq = offset - EIOINTC_COREMAP_START;
399 		index = irq;
400 		s->coremap.reg_u8[index] = data;
401 		eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
402 		break;
403 	default:
404 		ret = -EINVAL;
405 		break;
406 	}
407 
408 	return ret;
409 }
410 
411 static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
412 				struct loongarch_eiointc *s,
413 				gpa_t addr, int len, const void *val)
414 {
415 	int i, index, irq, bits, ret = 0;
416 	u8 cpu;
417 	u16 data, old_data;
418 	u16 coreisr, old_coreisr;
419 	gpa_t offset;
420 
421 	data = *(u16 *)val;
422 	offset = addr - EIOINTC_BASE;
423 
424 	switch (offset) {
425 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
426 		index = (offset - EIOINTC_NODETYPE_START) >> 1;
427 		s->nodetype.reg_u16[index] = data;
428 		break;
429 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
430 		/*
431 		 * ipmap cannot be set at runtime, can be set only at the beginning
432 		 * of irqchip driver, need not update upper irq level
433 		 */
434 		index = (offset - EIOINTC_IPMAP_START) >> 1;
435 		s->ipmap.reg_u16[index] = data;
436 		break;
437 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
438 		index = (offset - EIOINTC_ENABLE_START) >> 1;
439 		old_data = s->enable.reg_u32[index];
440 		s->enable.reg_u16[index] = data;
441 		/*
442 		 * 1: enable irq.
443 		 * update irq when isr is set.
444 		 */
445 		data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
446 		index = index << 1;
447 		for (i = 0; i < sizeof(data); i++) {
448 			u8 mask = (data >> (i * 8)) & 0xff;
449 			eiointc_enable_irq(vcpu, s, index + i, mask, 1);
450 		}
451 		/*
452 		 * 0: disable irq.
453 		 * update irq when isr is set.
454 		 */
455 		data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
456 		for (i = 0; i < sizeof(data); i++) {
457 			u8 mask = (data >> (i * 8)) & 0xff;
458 			eiointc_enable_irq(vcpu, s, index, mask, 0);
459 		}
460 		break;
461 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
462 		/* do not emulate hw bounced irq routing */
463 		index = (offset - EIOINTC_BOUNCE_START) >> 1;
464 		s->bounce.reg_u16[index] = data;
465 		break;
466 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
467 		index = (offset - EIOINTC_COREISR_START) >> 1;
468 		/* use attrs to get current cpu index */
469 		cpu = vcpu->vcpu_id;
470 		coreisr = data;
471 		old_coreisr = s->coreisr.reg_u16[cpu][index];
472 		/* write 1 to clear interrupt */
473 		s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
474 		coreisr &= old_coreisr;
475 		bits = sizeof(data) * 8;
476 		irq = find_first_bit((void *)&coreisr, bits);
477 		while (irq < bits) {
478 			eiointc_update_irq(s, irq + index * bits, 0);
479 			bitmap_clear((void *)&coreisr, irq, 1);
480 			irq = find_first_bit((void *)&coreisr, bits);
481 		}
482 		break;
483 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
484 		irq = offset - EIOINTC_COREMAP_START;
485 		index = irq >> 1;
486 		s->coremap.reg_u16[index] = data;
487 		eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
488 		break;
489 	default:
490 		ret = -EINVAL;
491 		break;
492 	}
493 
494 	return ret;
495 }
496 
497 static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
498 				struct loongarch_eiointc *s,
499 				gpa_t addr, int len, const void *val)
500 {
501 	int i, index, irq, bits, ret = 0;
502 	u8 cpu;
503 	u32 data, old_data;
504 	u32 coreisr, old_coreisr;
505 	gpa_t offset;
506 
507 	data = *(u32 *)val;
508 	offset = addr - EIOINTC_BASE;
509 
510 	switch (offset) {
511 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
512 		index = (offset - EIOINTC_NODETYPE_START) >> 2;
513 		s->nodetype.reg_u32[index] = data;
514 		break;
515 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
516 		/*
517 		 * ipmap cannot be set at runtime, can be set only at the beginning
518 		 * of irqchip driver, need not update upper irq level
519 		 */
520 		index = (offset - EIOINTC_IPMAP_START) >> 2;
521 		s->ipmap.reg_u32[index] = data;
522 		break;
523 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
524 		index = (offset - EIOINTC_ENABLE_START) >> 2;
525 		old_data = s->enable.reg_u32[index];
526 		s->enable.reg_u32[index] = data;
527 		/*
528 		 * 1: enable irq.
529 		 * update irq when isr is set.
530 		 */
531 		data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
532 		index = index << 2;
533 		for (i = 0; i < sizeof(data); i++) {
534 			u8 mask = (data >> (i * 8)) & 0xff;
535 			eiointc_enable_irq(vcpu, s, index + i, mask, 1);
536 		}
537 		/*
538 		 * 0: disable irq.
539 		 * update irq when isr is set.
540 		 */
541 		data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
542 		for (i = 0; i < sizeof(data); i++) {
543 			u8 mask = (data >> (i * 8)) & 0xff;
544 			eiointc_enable_irq(vcpu, s, index, mask, 0);
545 		}
546 		break;
547 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
548 		/* do not emulate hw bounced irq routing */
549 		index = (offset - EIOINTC_BOUNCE_START) >> 2;
550 		s->bounce.reg_u32[index] = data;
551 		break;
552 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
553 		index = (offset - EIOINTC_COREISR_START) >> 2;
554 		/* use attrs to get current cpu index */
555 		cpu = vcpu->vcpu_id;
556 		coreisr = data;
557 		old_coreisr = s->coreisr.reg_u32[cpu][index];
558 		/* write 1 to clear interrupt */
559 		s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
560 		coreisr &= old_coreisr;
561 		bits = sizeof(data) * 8;
562 		irq = find_first_bit((void *)&coreisr, bits);
563 		while (irq < bits) {
564 			eiointc_update_irq(s, irq + index * bits, 0);
565 			bitmap_clear((void *)&coreisr, irq, 1);
566 			irq = find_first_bit((void *)&coreisr, bits);
567 		}
568 		break;
569 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
570 		irq = offset - EIOINTC_COREMAP_START;
571 		index = irq >> 2;
572 		s->coremap.reg_u32[index] = data;
573 		eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
574 		break;
575 	default:
576 		ret = -EINVAL;
577 		break;
578 	}
579 
580 	return ret;
581 }
582 
583 static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
584 				struct loongarch_eiointc *s,
585 				gpa_t addr, int len, const void *val)
586 {
587 	int i, index, irq, bits, ret = 0;
588 	u8 cpu;
589 	u64 data, old_data;
590 	u64 coreisr, old_coreisr;
591 	gpa_t offset;
592 
593 	data = *(u64 *)val;
594 	offset = addr - EIOINTC_BASE;
595 
596 	switch (offset) {
597 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
598 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
599 		s->nodetype.reg_u64[index] = data;
600 		break;
601 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
602 		/*
603 		 * ipmap cannot be set at runtime, can be set only at the beginning
604 		 * of irqchip driver, need not update upper irq level
605 		 */
606 		index = (offset - EIOINTC_IPMAP_START) >> 3;
607 		s->ipmap.reg_u64 = data;
608 		break;
609 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
610 		index = (offset - EIOINTC_ENABLE_START) >> 3;
611 		old_data = s->enable.reg_u64[index];
612 		s->enable.reg_u64[index] = data;
613 		/*
614 		 * 1: enable irq.
615 		 * update irq when isr is set.
616 		 */
617 		data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
618 		index = index << 3;
619 		for (i = 0; i < sizeof(data); i++) {
620 			u8 mask = (data >> (i * 8)) & 0xff;
621 			eiointc_enable_irq(vcpu, s, index + i, mask, 1);
622 		}
623 		/*
624 		 * 0: disable irq.
625 		 * update irq when isr is set.
626 		 */
627 		data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
628 		for (i = 0; i < sizeof(data); i++) {
629 			u8 mask = (data >> (i * 8)) & 0xff;
630 			eiointc_enable_irq(vcpu, s, index, mask, 0);
631 		}
632 		break;
633 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
634 		/* do not emulate hw bounced irq routing */
635 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
636 		s->bounce.reg_u64[index] = data;
637 		break;
638 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
639 		index = (offset - EIOINTC_COREISR_START) >> 3;
640 		/* use attrs to get current cpu index */
641 		cpu = vcpu->vcpu_id;
642 		coreisr = data;
643 		old_coreisr = s->coreisr.reg_u64[cpu][index];
644 		/* write 1 to clear interrupt */
645 		s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
646 		coreisr &= old_coreisr;
647 		bits = sizeof(data) * 8;
648 		irq = find_first_bit((void *)&coreisr, bits);
649 		while (irq < bits) {
650 			eiointc_update_irq(s, irq + index * bits, 0);
651 			bitmap_clear((void *)&coreisr, irq, 1);
652 			irq = find_first_bit((void *)&coreisr, bits);
653 		}
654 		break;
655 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
656 		irq = offset - EIOINTC_COREMAP_START;
657 		index = irq >> 3;
658 		s->coremap.reg_u64[index] = data;
659 		eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
660 		break;
661 	default:
662 		ret = -EINVAL;
663 		break;
664 	}
665 
666 	return ret;
667 }
668 
669 static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
670 			struct kvm_io_device *dev,
671 			gpa_t addr, int len, const void *val)
672 {
673 	int ret = -EINVAL;
674 	unsigned long flags;
675 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
676 
677 	if (!eiointc) {
678 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
679 		return -EINVAL;
680 	}
681 
682 	vcpu->kvm->stat.eiointc_write_exits++;
683 	spin_lock_irqsave(&eiointc->lock, flags);
684 	switch (len) {
685 	case 1:
686 		ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
687 		break;
688 	case 2:
689 		ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
690 		break;
691 	case 4:
692 		ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
693 		break;
694 	case 8:
695 		ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
696 		break;
697 	default:
698 		WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
699 						__func__, addr, len);
700 	}
701 	spin_unlock_irqrestore(&eiointc->lock, flags);
702 
703 	return ret;
704 }
705 
706 static const struct kvm_io_device_ops kvm_eiointc_ops = {
707 	.read	= kvm_eiointc_read,
708 	.write	= kvm_eiointc_write,
709 };
710 
711 static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
712 				struct kvm_io_device *dev,
713 				gpa_t addr, int len, void *val)
714 {
715 	unsigned long flags;
716 	u32 *data = val;
717 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
718 
719 	if (!eiointc) {
720 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
721 		return -EINVAL;
722 	}
723 
724 	addr -= EIOINTC_VIRT_BASE;
725 	spin_lock_irqsave(&eiointc->lock, flags);
726 	switch (addr) {
727 	case EIOINTC_VIRT_FEATURES:
728 		*data = eiointc->features;
729 		break;
730 	case EIOINTC_VIRT_CONFIG:
731 		*data = eiointc->status;
732 		break;
733 	default:
734 		break;
735 	}
736 	spin_unlock_irqrestore(&eiointc->lock, flags);
737 
738 	return 0;
739 }
740 
741 static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
742 				struct kvm_io_device *dev,
743 				gpa_t addr, int len, const void *val)
744 {
745 	int ret = 0;
746 	unsigned long flags;
747 	u32 value = *(u32 *)val;
748 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
749 
750 	if (!eiointc) {
751 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
752 		return -EINVAL;
753 	}
754 
755 	addr -= EIOINTC_VIRT_BASE;
756 	spin_lock_irqsave(&eiointc->lock, flags);
757 	switch (addr) {
758 	case EIOINTC_VIRT_FEATURES:
759 		ret = -EPERM;
760 		break;
761 	case EIOINTC_VIRT_CONFIG:
762 		/*
763 		 * eiointc features can only be set at disabled status
764 		 */
765 		if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
766 			ret = -EPERM;
767 			break;
768 		}
769 		eiointc->status = value & eiointc->features;
770 		break;
771 	default:
772 		break;
773 	}
774 	spin_unlock_irqrestore(&eiointc->lock, flags);
775 
776 	return ret;
777 }
778 
779 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
780 	.read	= kvm_eiointc_virt_read,
781 	.write	= kvm_eiointc_virt_write,
782 };
783 
784 static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
785 					struct kvm_device_attr *attr)
786 {
787 	int ret = 0;
788 	unsigned long flags;
789 	unsigned long type = (unsigned long)attr->attr;
790 	u32 i, start_irq;
791 	void __user *data;
792 	struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
793 
794 	data = (void __user *)attr->addr;
795 	spin_lock_irqsave(&s->lock, flags);
796 	switch (type) {
797 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
798 		if (copy_from_user(&s->num_cpu, data, 4))
799 			ret = -EFAULT;
800 		break;
801 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
802 		if (copy_from_user(&s->features, data, 4))
803 			ret = -EFAULT;
804 		if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
805 			s->status |= BIT(EIOINTC_ENABLE);
806 		break;
807 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
808 		eiointc_set_sw_coreisr(s);
809 		for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
810 			start_irq = i * 4;
811 			eiointc_update_sw_coremap(s, start_irq,
812 					(void *)&s->coremap.reg_u32[i], sizeof(u32), false);
813 		}
814 		break;
815 	default:
816 		break;
817 	}
818 	spin_unlock_irqrestore(&s->lock, flags);
819 
820 	return ret;
821 }
822 
823 static int kvm_eiointc_regs_access(struct kvm_device *dev,
824 					struct kvm_device_attr *attr,
825 					bool is_write)
826 {
827 	int addr, cpuid, offset, ret = 0;
828 	unsigned long flags;
829 	void *p = NULL;
830 	void __user *data;
831 	struct loongarch_eiointc *s;
832 
833 	s = dev->kvm->arch.eiointc;
834 	addr = attr->attr;
835 	cpuid = addr >> 16;
836 	addr &= 0xffff;
837 	data = (void __user *)attr->addr;
838 	switch (addr) {
839 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
840 		offset = (addr - EIOINTC_NODETYPE_START) / 4;
841 		p = &s->nodetype.reg_u32[offset];
842 		break;
843 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
844 		offset = (addr - EIOINTC_IPMAP_START) / 4;
845 		p = &s->ipmap.reg_u32[offset];
846 		break;
847 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
848 		offset = (addr - EIOINTC_ENABLE_START) / 4;
849 		p = &s->enable.reg_u32[offset];
850 		break;
851 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
852 		offset = (addr - EIOINTC_BOUNCE_START) / 4;
853 		p = &s->bounce.reg_u32[offset];
854 		break;
855 	case EIOINTC_ISR_START ... EIOINTC_ISR_END:
856 		offset = (addr - EIOINTC_ISR_START) / 4;
857 		p = &s->isr.reg_u32[offset];
858 		break;
859 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
860 		offset = (addr - EIOINTC_COREISR_START) / 4;
861 		p = &s->coreisr.reg_u32[cpuid][offset];
862 		break;
863 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
864 		offset = (addr - EIOINTC_COREMAP_START) / 4;
865 		p = &s->coremap.reg_u32[offset];
866 		break;
867 	default:
868 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
869 		return -EINVAL;
870 	}
871 
872 	spin_lock_irqsave(&s->lock, flags);
873 	if (is_write) {
874 		if (copy_from_user(p, data, 4))
875 			ret = -EFAULT;
876 	} else {
877 		if (copy_to_user(data, p, 4))
878 			ret = -EFAULT;
879 	}
880 	spin_unlock_irqrestore(&s->lock, flags);
881 
882 	return ret;
883 }
884 
885 static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
886 					struct kvm_device_attr *attr,
887 					bool is_write)
888 {
889 	int addr, ret = 0;
890 	unsigned long flags;
891 	void *p = NULL;
892 	void __user *data;
893 	struct loongarch_eiointc *s;
894 
895 	s = dev->kvm->arch.eiointc;
896 	addr = attr->attr;
897 	addr &= 0xffff;
898 
899 	data = (void __user *)attr->addr;
900 	switch (addr) {
901 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
902 		p = &s->num_cpu;
903 		break;
904 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
905 		p = &s->features;
906 		break;
907 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
908 		p = &s->status;
909 		break;
910 	default:
911 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
912 		return -EINVAL;
913 	}
914 	spin_lock_irqsave(&s->lock, flags);
915 	if (is_write) {
916 		if (copy_from_user(p, data, 4))
917 			ret = -EFAULT;
918 	} else {
919 		if (copy_to_user(data, p, 4))
920 			ret = -EFAULT;
921 	}
922 	spin_unlock_irqrestore(&s->lock, flags);
923 
924 	return ret;
925 }
926 
927 static int kvm_eiointc_get_attr(struct kvm_device *dev,
928 				struct kvm_device_attr *attr)
929 {
930 	switch (attr->group) {
931 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
932 		return kvm_eiointc_regs_access(dev, attr, false);
933 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
934 		return kvm_eiointc_sw_status_access(dev, attr, false);
935 	default:
936 		return -EINVAL;
937 	}
938 }
939 
940 static int kvm_eiointc_set_attr(struct kvm_device *dev,
941 				struct kvm_device_attr *attr)
942 {
943 	switch (attr->group) {
944 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
945 		return kvm_eiointc_ctrl_access(dev, attr);
946 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
947 		return kvm_eiointc_regs_access(dev, attr, true);
948 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
949 		return kvm_eiointc_sw_status_access(dev, attr, true);
950 	default:
951 		return -EINVAL;
952 	}
953 }
954 
955 static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
956 {
957 	int ret;
958 	struct loongarch_eiointc *s;
959 	struct kvm_io_device *device, *device1;
960 	struct kvm *kvm = dev->kvm;
961 
962 	/* eiointc has been created */
963 	if (kvm->arch.eiointc)
964 		return -EINVAL;
965 
966 	s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
967 	if (!s)
968 		return -ENOMEM;
969 
970 	spin_lock_init(&s->lock);
971 	s->kvm = kvm;
972 
973 	/*
974 	 * Initialize IOCSR device
975 	 */
976 	device = &s->device;
977 	kvm_iodevice_init(device, &kvm_eiointc_ops);
978 	mutex_lock(&kvm->slots_lock);
979 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
980 			EIOINTC_BASE, EIOINTC_SIZE, device);
981 	mutex_unlock(&kvm->slots_lock);
982 	if (ret < 0) {
983 		kfree(s);
984 		return ret;
985 	}
986 
987 	device1 = &s->device_vext;
988 	kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
989 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
990 			EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
991 	if (ret < 0) {
992 		kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
993 		kfree(s);
994 		return ret;
995 	}
996 	kvm->arch.eiointc = s;
997 
998 	return 0;
999 }
1000 
1001 static void kvm_eiointc_destroy(struct kvm_device *dev)
1002 {
1003 	struct kvm *kvm;
1004 	struct loongarch_eiointc *eiointc;
1005 
1006 	if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
1007 		return;
1008 
1009 	kvm = dev->kvm;
1010 	eiointc = kvm->arch.eiointc;
1011 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
1012 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
1013 	kfree(eiointc);
1014 }
1015 
1016 static struct kvm_device_ops kvm_eiointc_dev_ops = {
1017 	.name = "kvm-loongarch-eiointc",
1018 	.create = kvm_eiointc_create,
1019 	.destroy = kvm_eiointc_destroy,
1020 	.set_attr = kvm_eiointc_set_attr,
1021 	.get_attr = kvm_eiointc_get_attr,
1022 };
1023 
1024 int kvm_loongarch_register_eiointc_device(void)
1025 {
1026 	return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
1027 }
1028