xref: /linux/arch/loongarch/kvm/intc/eiointc.c (revision 3a3de75a68ff8d52466980c4cfb2c16192d5e4e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
9 
eiointc_set_sw_coreisr(struct loongarch_eiointc * s)10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11 {
12 	int ipnum, cpu, cpuid, irq_index, irq_mask, irq;
13 	struct kvm_vcpu *vcpu;
14 
15 	for (irq = 0; irq < EIOINTC_IRQS; irq++) {
16 		ipnum = s->ipmap.reg_u8[irq / 32];
17 		if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
18 			ipnum = count_trailing_zeros(ipnum);
19 			ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
20 		}
21 		irq_index = irq / 32;
22 		irq_mask = BIT(irq & 0x1f);
23 
24 		cpuid = s->coremap.reg_u8[irq];
25 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
26 		if (!vcpu)
27 			continue;
28 
29 		cpu = vcpu->vcpu_id;
30 		if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
31 			set_bit(irq, s->sw_coreisr[cpu][ipnum]);
32 		else
33 			clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
34 	}
35 }
36 
eiointc_update_irq(struct loongarch_eiointc * s,int irq,int level)37 static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
38 {
39 	int ipnum, cpu, found, irq_index, irq_mask;
40 	struct kvm_vcpu *vcpu;
41 	struct kvm_interrupt vcpu_irq;
42 
43 	ipnum = s->ipmap.reg_u8[irq / 32];
44 	if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
45 		ipnum = count_trailing_zeros(ipnum);
46 		ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
47 	}
48 
49 	cpu = s->sw_coremap[irq];
50 	vcpu = kvm_get_vcpu(s->kvm, cpu);
51 	irq_index = irq / 32;
52 	irq_mask = BIT(irq & 0x1f);
53 
54 	if (level) {
55 		/* if not enable return false */
56 		if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
57 			return;
58 		s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
59 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
60 		set_bit(irq, s->sw_coreisr[cpu][ipnum]);
61 	} else {
62 		s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
63 		clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
64 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
65 	}
66 
67 	if (found < EIOINTC_IRQS)
68 		return; /* other irq is handling, needn't update parent irq */
69 
70 	vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
71 	kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
72 }
73 
eiointc_update_sw_coremap(struct loongarch_eiointc * s,int irq,u64 val,u32 len,bool notify)74 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
75 					int irq, u64 val, u32 len, bool notify)
76 {
77 	int i, cpu, cpuid;
78 	struct kvm_vcpu *vcpu;
79 
80 	for (i = 0; i < len; i++) {
81 		cpuid = val & 0xff;
82 		val = val >> 8;
83 
84 		if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
85 			cpuid = ffs(cpuid) - 1;
86 			cpuid = (cpuid >= 4) ? 0 : cpuid;
87 		}
88 
89 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
90 		if (!vcpu)
91 			continue;
92 
93 		cpu = vcpu->vcpu_id;
94 		if (s->sw_coremap[irq + i] == cpu)
95 			continue;
96 
97 		if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
98 			/* lower irq at old cpu and raise irq at new cpu */
99 			eiointc_update_irq(s, irq + i, 0);
100 			s->sw_coremap[irq + i] = cpu;
101 			eiointc_update_irq(s, irq + i, 1);
102 		} else {
103 			s->sw_coremap[irq + i] = cpu;
104 		}
105 	}
106 }
107 
eiointc_set_irq(struct loongarch_eiointc * s,int irq,int level)108 void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
109 {
110 	unsigned long flags;
111 	unsigned long *isr = (unsigned long *)s->isr.reg_u8;
112 
113 	level ? set_bit(irq, isr) : clear_bit(irq, isr);
114 	spin_lock_irqsave(&s->lock, flags);
115 	eiointc_update_irq(s, irq, level);
116 	spin_unlock_irqrestore(&s->lock, flags);
117 }
118 
eiointc_enable_irq(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,int index,u8 mask,int level)119 static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
120 		struct loongarch_eiointc *s, int index, u8 mask, int level)
121 {
122 	u8 val;
123 	int irq;
124 
125 	val = mask & s->isr.reg_u8[index];
126 	irq = ffs(val);
127 	while (irq != 0) {
128 		/*
129 		 * enable bit change from 0 to 1,
130 		 * need to update irq by pending bits
131 		 */
132 		eiointc_update_irq(s, irq - 1 + index * 8, level);
133 		val &= ~BIT(irq - 1);
134 		irq = ffs(val);
135 	}
136 }
137 
loongarch_eiointc_readb(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,void * val)138 static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
139 				gpa_t addr, int len, void *val)
140 {
141 	int index, ret = 0;
142 	u8 data = 0;
143 	gpa_t offset;
144 
145 	offset = addr - EIOINTC_BASE;
146 	switch (offset) {
147 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
148 		index = offset - EIOINTC_NODETYPE_START;
149 		data = s->nodetype.reg_u8[index];
150 		break;
151 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
152 		index = offset - EIOINTC_IPMAP_START;
153 		data = s->ipmap.reg_u8[index];
154 		break;
155 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
156 		index = offset - EIOINTC_ENABLE_START;
157 		data = s->enable.reg_u8[index];
158 		break;
159 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
160 		index = offset - EIOINTC_BOUNCE_START;
161 		data = s->bounce.reg_u8[index];
162 		break;
163 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
164 		index = offset - EIOINTC_COREISR_START;
165 		data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
166 		break;
167 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
168 		index = offset - EIOINTC_COREMAP_START;
169 		data = s->coremap.reg_u8[index];
170 		break;
171 	default:
172 		ret = -EINVAL;
173 		break;
174 	}
175 	*(u8 *)val = data;
176 
177 	return ret;
178 }
179 
loongarch_eiointc_readw(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,void * val)180 static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
181 				gpa_t addr, int len, void *val)
182 {
183 	int index, ret = 0;
184 	u16 data = 0;
185 	gpa_t offset;
186 
187 	offset = addr - EIOINTC_BASE;
188 	switch (offset) {
189 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
190 		index = (offset - EIOINTC_NODETYPE_START) >> 1;
191 		data = s->nodetype.reg_u16[index];
192 		break;
193 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
194 		index = (offset - EIOINTC_IPMAP_START) >> 1;
195 		data = s->ipmap.reg_u16[index];
196 		break;
197 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
198 		index = (offset - EIOINTC_ENABLE_START) >> 1;
199 		data = s->enable.reg_u16[index];
200 		break;
201 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
202 		index = (offset - EIOINTC_BOUNCE_START) >> 1;
203 		data = s->bounce.reg_u16[index];
204 		break;
205 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
206 		index = (offset - EIOINTC_COREISR_START) >> 1;
207 		data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
208 		break;
209 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
210 		index = (offset - EIOINTC_COREMAP_START) >> 1;
211 		data = s->coremap.reg_u16[index];
212 		break;
213 	default:
214 		ret = -EINVAL;
215 		break;
216 	}
217 	*(u16 *)val = data;
218 
219 	return ret;
220 }
221 
loongarch_eiointc_readl(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,void * val)222 static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
223 				gpa_t addr, int len, void *val)
224 {
225 	int index, ret = 0;
226 	u32 data = 0;
227 	gpa_t offset;
228 
229 	offset = addr - EIOINTC_BASE;
230 	switch (offset) {
231 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
232 		index = (offset - EIOINTC_NODETYPE_START) >> 2;
233 		data = s->nodetype.reg_u32[index];
234 		break;
235 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
236 		index = (offset - EIOINTC_IPMAP_START) >> 2;
237 		data = s->ipmap.reg_u32[index];
238 		break;
239 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
240 		index = (offset - EIOINTC_ENABLE_START) >> 2;
241 		data = s->enable.reg_u32[index];
242 		break;
243 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
244 		index = (offset - EIOINTC_BOUNCE_START) >> 2;
245 		data = s->bounce.reg_u32[index];
246 		break;
247 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
248 		index = (offset - EIOINTC_COREISR_START) >> 2;
249 		data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
250 		break;
251 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
252 		index = (offset - EIOINTC_COREMAP_START) >> 2;
253 		data = s->coremap.reg_u32[index];
254 		break;
255 	default:
256 		ret = -EINVAL;
257 		break;
258 	}
259 	*(u32 *)val = data;
260 
261 	return ret;
262 }
263 
loongarch_eiointc_readq(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,void * val)264 static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
265 				gpa_t addr, int len, void *val)
266 {
267 	int index, ret = 0;
268 	u64 data = 0;
269 	gpa_t offset;
270 
271 	offset = addr - EIOINTC_BASE;
272 	switch (offset) {
273 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
274 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
275 		data = s->nodetype.reg_u64[index];
276 		break;
277 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
278 		index = (offset - EIOINTC_IPMAP_START) >> 3;
279 		data = s->ipmap.reg_u64;
280 		break;
281 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
282 		index = (offset - EIOINTC_ENABLE_START) >> 3;
283 		data = s->enable.reg_u64[index];
284 		break;
285 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
286 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
287 		data = s->bounce.reg_u64[index];
288 		break;
289 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
290 		index = (offset - EIOINTC_COREISR_START) >> 3;
291 		data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
292 		break;
293 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
294 		index = (offset - EIOINTC_COREMAP_START) >> 3;
295 		data = s->coremap.reg_u64[index];
296 		break;
297 	default:
298 		ret = -EINVAL;
299 		break;
300 	}
301 	*(u64 *)val = data;
302 
303 	return ret;
304 }
305 
kvm_eiointc_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)306 static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
307 			struct kvm_io_device *dev,
308 			gpa_t addr, int len, void *val)
309 {
310 	int ret = -EINVAL;
311 	unsigned long flags;
312 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
313 
314 	if (!eiointc) {
315 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
316 		return -EINVAL;
317 	}
318 
319 	if (addr & (len - 1)) {
320 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
321 		return -EINVAL;
322 	}
323 
324 	vcpu->kvm->stat.eiointc_read_exits++;
325 	spin_lock_irqsave(&eiointc->lock, flags);
326 	switch (len) {
327 	case 1:
328 		ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
329 		break;
330 	case 2:
331 		ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
332 		break;
333 	case 4:
334 		ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
335 		break;
336 	case 8:
337 		ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
338 		break;
339 	default:
340 		WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
341 						__func__, addr, len);
342 	}
343 	spin_unlock_irqrestore(&eiointc->lock, flags);
344 
345 	return ret;
346 }
347 
loongarch_eiointc_writeb(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,const void * val)348 static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
349 				struct loongarch_eiointc *s,
350 				gpa_t addr, int len, const void *val)
351 {
352 	int index, irq, bits, ret = 0;
353 	u8 cpu;
354 	u8 data, old_data;
355 	u8 coreisr, old_coreisr;
356 	gpa_t offset;
357 
358 	data = *(u8 *)val;
359 	offset = addr - EIOINTC_BASE;
360 
361 	switch (offset) {
362 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
363 		index = (offset - EIOINTC_NODETYPE_START);
364 		s->nodetype.reg_u8[index] = data;
365 		break;
366 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
367 		/*
368 		 * ipmap cannot be set at runtime, can be set only at the beginning
369 		 * of irqchip driver, need not update upper irq level
370 		 */
371 		index = (offset - EIOINTC_IPMAP_START);
372 		s->ipmap.reg_u8[index] = data;
373 		break;
374 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
375 		index = (offset - EIOINTC_ENABLE_START);
376 		old_data = s->enable.reg_u8[index];
377 		s->enable.reg_u8[index] = data;
378 		/*
379 		 * 1: enable irq.
380 		 * update irq when isr is set.
381 		 */
382 		data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
383 		eiointc_enable_irq(vcpu, s, index, data, 1);
384 		/*
385 		 * 0: disable irq.
386 		 * update irq when isr is set.
387 		 */
388 		data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
389 		eiointc_enable_irq(vcpu, s, index, data, 0);
390 		break;
391 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
392 		/* do not emulate hw bounced irq routing */
393 		index = offset - EIOINTC_BOUNCE_START;
394 		s->bounce.reg_u8[index] = data;
395 		break;
396 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
397 		index = (offset - EIOINTC_COREISR_START);
398 		/* use attrs to get current cpu index */
399 		cpu = vcpu->vcpu_id;
400 		coreisr = data;
401 		old_coreisr = s->coreisr.reg_u8[cpu][index];
402 		/* write 1 to clear interrupt */
403 		s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
404 		coreisr &= old_coreisr;
405 		bits = sizeof(data) * 8;
406 		irq = find_first_bit((void *)&coreisr, bits);
407 		while (irq < bits) {
408 			eiointc_update_irq(s, irq + index * bits, 0);
409 			bitmap_clear((void *)&coreisr, irq, 1);
410 			irq = find_first_bit((void *)&coreisr, bits);
411 		}
412 		break;
413 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
414 		irq = offset - EIOINTC_COREMAP_START;
415 		index = irq;
416 		s->coremap.reg_u8[index] = data;
417 		eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
418 		break;
419 	default:
420 		ret = -EINVAL;
421 		break;
422 	}
423 
424 	return ret;
425 }
426 
loongarch_eiointc_writew(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,const void * val)427 static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
428 				struct loongarch_eiointc *s,
429 				gpa_t addr, int len, const void *val)
430 {
431 	int i, index, irq, bits, ret = 0;
432 	u8 cpu;
433 	u16 data, old_data;
434 	u16 coreisr, old_coreisr;
435 	gpa_t offset;
436 
437 	data = *(u16 *)val;
438 	offset = addr - EIOINTC_BASE;
439 
440 	switch (offset) {
441 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
442 		index = (offset - EIOINTC_NODETYPE_START) >> 1;
443 		s->nodetype.reg_u16[index] = data;
444 		break;
445 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
446 		/*
447 		 * ipmap cannot be set at runtime, can be set only at the beginning
448 		 * of irqchip driver, need not update upper irq level
449 		 */
450 		index = (offset - EIOINTC_IPMAP_START) >> 1;
451 		s->ipmap.reg_u16[index] = data;
452 		break;
453 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
454 		index = (offset - EIOINTC_ENABLE_START) >> 1;
455 		old_data = s->enable.reg_u16[index];
456 		s->enable.reg_u16[index] = data;
457 		/*
458 		 * 1: enable irq.
459 		 * update irq when isr is set.
460 		 */
461 		data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
462 		for (i = 0; i < sizeof(data); i++) {
463 			u8 mask = (data >> (i * 8)) & 0xff;
464 			eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1);
465 		}
466 		/*
467 		 * 0: disable irq.
468 		 * update irq when isr is set.
469 		 */
470 		data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
471 		for (i = 0; i < sizeof(data); i++) {
472 			u8 mask = (data >> (i * 8)) & 0xff;
473 			eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0);
474 		}
475 		break;
476 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
477 		/* do not emulate hw bounced irq routing */
478 		index = (offset - EIOINTC_BOUNCE_START) >> 1;
479 		s->bounce.reg_u16[index] = data;
480 		break;
481 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
482 		index = (offset - EIOINTC_COREISR_START) >> 1;
483 		/* use attrs to get current cpu index */
484 		cpu = vcpu->vcpu_id;
485 		coreisr = data;
486 		old_coreisr = s->coreisr.reg_u16[cpu][index];
487 		/* write 1 to clear interrupt */
488 		s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
489 		coreisr &= old_coreisr;
490 		bits = sizeof(data) * 8;
491 		irq = find_first_bit((void *)&coreisr, bits);
492 		while (irq < bits) {
493 			eiointc_update_irq(s, irq + index * bits, 0);
494 			bitmap_clear((void *)&coreisr, irq, 1);
495 			irq = find_first_bit((void *)&coreisr, bits);
496 		}
497 		break;
498 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
499 		irq = offset - EIOINTC_COREMAP_START;
500 		index = irq >> 1;
501 		s->coremap.reg_u16[index] = data;
502 		eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
503 		break;
504 	default:
505 		ret = -EINVAL;
506 		break;
507 	}
508 
509 	return ret;
510 }
511 
loongarch_eiointc_writel(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,const void * val)512 static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
513 				struct loongarch_eiointc *s,
514 				gpa_t addr, int len, const void *val)
515 {
516 	int i, index, irq, bits, ret = 0;
517 	u8 cpu;
518 	u32 data, old_data;
519 	u32 coreisr, old_coreisr;
520 	gpa_t offset;
521 
522 	data = *(u32 *)val;
523 	offset = addr - EIOINTC_BASE;
524 
525 	switch (offset) {
526 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
527 		index = (offset - EIOINTC_NODETYPE_START) >> 2;
528 		s->nodetype.reg_u32[index] = data;
529 		break;
530 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
531 		/*
532 		 * ipmap cannot be set at runtime, can be set only at the beginning
533 		 * of irqchip driver, need not update upper irq level
534 		 */
535 		index = (offset - EIOINTC_IPMAP_START) >> 2;
536 		s->ipmap.reg_u32[index] = data;
537 		break;
538 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
539 		index = (offset - EIOINTC_ENABLE_START) >> 2;
540 		old_data = s->enable.reg_u32[index];
541 		s->enable.reg_u32[index] = data;
542 		/*
543 		 * 1: enable irq.
544 		 * update irq when isr is set.
545 		 */
546 		data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
547 		for (i = 0; i < sizeof(data); i++) {
548 			u8 mask = (data >> (i * 8)) & 0xff;
549 			eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1);
550 		}
551 		/*
552 		 * 0: disable irq.
553 		 * update irq when isr is set.
554 		 */
555 		data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
556 		for (i = 0; i < sizeof(data); i++) {
557 			u8 mask = (data >> (i * 8)) & 0xff;
558 			eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0);
559 		}
560 		break;
561 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
562 		/* do not emulate hw bounced irq routing */
563 		index = (offset - EIOINTC_BOUNCE_START) >> 2;
564 		s->bounce.reg_u32[index] = data;
565 		break;
566 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
567 		index = (offset - EIOINTC_COREISR_START) >> 2;
568 		/* use attrs to get current cpu index */
569 		cpu = vcpu->vcpu_id;
570 		coreisr = data;
571 		old_coreisr = s->coreisr.reg_u32[cpu][index];
572 		/* write 1 to clear interrupt */
573 		s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
574 		coreisr &= old_coreisr;
575 		bits = sizeof(data) * 8;
576 		irq = find_first_bit((void *)&coreisr, bits);
577 		while (irq < bits) {
578 			eiointc_update_irq(s, irq + index * bits, 0);
579 			bitmap_clear((void *)&coreisr, irq, 1);
580 			irq = find_first_bit((void *)&coreisr, bits);
581 		}
582 		break;
583 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
584 		irq = offset - EIOINTC_COREMAP_START;
585 		index = irq >> 2;
586 		s->coremap.reg_u32[index] = data;
587 		eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
588 		break;
589 	default:
590 		ret = -EINVAL;
591 		break;
592 	}
593 
594 	return ret;
595 }
596 
loongarch_eiointc_writeq(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,int len,const void * val)597 static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
598 				struct loongarch_eiointc *s,
599 				gpa_t addr, int len, const void *val)
600 {
601 	int i, index, irq, bits, ret = 0;
602 	u8 cpu;
603 	u64 data, old_data;
604 	u64 coreisr, old_coreisr;
605 	gpa_t offset;
606 
607 	data = *(u64 *)val;
608 	offset = addr - EIOINTC_BASE;
609 
610 	switch (offset) {
611 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
612 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
613 		s->nodetype.reg_u64[index] = data;
614 		break;
615 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
616 		/*
617 		 * ipmap cannot be set at runtime, can be set only at the beginning
618 		 * of irqchip driver, need not update upper irq level
619 		 */
620 		index = (offset - EIOINTC_IPMAP_START) >> 3;
621 		s->ipmap.reg_u64 = data;
622 		break;
623 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
624 		index = (offset - EIOINTC_ENABLE_START) >> 3;
625 		old_data = s->enable.reg_u64[index];
626 		s->enable.reg_u64[index] = data;
627 		/*
628 		 * 1: enable irq.
629 		 * update irq when isr is set.
630 		 */
631 		data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
632 		for (i = 0; i < sizeof(data); i++) {
633 			u8 mask = (data >> (i * 8)) & 0xff;
634 			eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1);
635 		}
636 		/*
637 		 * 0: disable irq.
638 		 * update irq when isr is set.
639 		 */
640 		data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
641 		for (i = 0; i < sizeof(data); i++) {
642 			u8 mask = (data >> (i * 8)) & 0xff;
643 			eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0);
644 		}
645 		break;
646 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
647 		/* do not emulate hw bounced irq routing */
648 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
649 		s->bounce.reg_u64[index] = data;
650 		break;
651 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
652 		index = (offset - EIOINTC_COREISR_START) >> 3;
653 		/* use attrs to get current cpu index */
654 		cpu = vcpu->vcpu_id;
655 		coreisr = data;
656 		old_coreisr = s->coreisr.reg_u64[cpu][index];
657 		/* write 1 to clear interrupt */
658 		s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
659 		coreisr &= old_coreisr;
660 		bits = sizeof(data) * 8;
661 		irq = find_first_bit((void *)&coreisr, bits);
662 		while (irq < bits) {
663 			eiointc_update_irq(s, irq + index * bits, 0);
664 			bitmap_clear((void *)&coreisr, irq, 1);
665 			irq = find_first_bit((void *)&coreisr, bits);
666 		}
667 		break;
668 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
669 		irq = offset - EIOINTC_COREMAP_START;
670 		index = irq >> 3;
671 		s->coremap.reg_u64[index] = data;
672 		eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
673 		break;
674 	default:
675 		ret = -EINVAL;
676 		break;
677 	}
678 
679 	return ret;
680 }
681 
kvm_eiointc_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)682 static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
683 			struct kvm_io_device *dev,
684 			gpa_t addr, int len, const void *val)
685 {
686 	int ret = -EINVAL;
687 	unsigned long flags;
688 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
689 
690 	if (!eiointc) {
691 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
692 		return -EINVAL;
693 	}
694 
695 	if (addr & (len - 1)) {
696 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
697 		return -EINVAL;
698 	}
699 
700 	vcpu->kvm->stat.eiointc_write_exits++;
701 	spin_lock_irqsave(&eiointc->lock, flags);
702 	switch (len) {
703 	case 1:
704 		ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
705 		break;
706 	case 2:
707 		ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
708 		break;
709 	case 4:
710 		ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
711 		break;
712 	case 8:
713 		ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
714 		break;
715 	default:
716 		WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
717 						__func__, addr, len);
718 	}
719 	spin_unlock_irqrestore(&eiointc->lock, flags);
720 
721 	return ret;
722 }
723 
724 static const struct kvm_io_device_ops kvm_eiointc_ops = {
725 	.read	= kvm_eiointc_read,
726 	.write	= kvm_eiointc_write,
727 };
728 
kvm_eiointc_virt_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)729 static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
730 				struct kvm_io_device *dev,
731 				gpa_t addr, int len, void *val)
732 {
733 	unsigned long flags;
734 	u32 *data = val;
735 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
736 
737 	if (!eiointc) {
738 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
739 		return -EINVAL;
740 	}
741 
742 	addr -= EIOINTC_VIRT_BASE;
743 	spin_lock_irqsave(&eiointc->lock, flags);
744 	switch (addr) {
745 	case EIOINTC_VIRT_FEATURES:
746 		*data = eiointc->features;
747 		break;
748 	case EIOINTC_VIRT_CONFIG:
749 		*data = eiointc->status;
750 		break;
751 	default:
752 		break;
753 	}
754 	spin_unlock_irqrestore(&eiointc->lock, flags);
755 
756 	return 0;
757 }
758 
kvm_eiointc_virt_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)759 static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
760 				struct kvm_io_device *dev,
761 				gpa_t addr, int len, const void *val)
762 {
763 	int ret = 0;
764 	unsigned long flags;
765 	u32 value = *(u32 *)val;
766 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
767 
768 	if (!eiointc) {
769 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
770 		return -EINVAL;
771 	}
772 
773 	addr -= EIOINTC_VIRT_BASE;
774 	spin_lock_irqsave(&eiointc->lock, flags);
775 	switch (addr) {
776 	case EIOINTC_VIRT_FEATURES:
777 		ret = -EPERM;
778 		break;
779 	case EIOINTC_VIRT_CONFIG:
780 		/*
781 		 * eiointc features can only be set at disabled status
782 		 */
783 		if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
784 			ret = -EPERM;
785 			break;
786 		}
787 		eiointc->status = value & eiointc->features;
788 		break;
789 	default:
790 		break;
791 	}
792 	spin_unlock_irqrestore(&eiointc->lock, flags);
793 
794 	return ret;
795 }
796 
797 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
798 	.read	= kvm_eiointc_virt_read,
799 	.write	= kvm_eiointc_virt_write,
800 };
801 
kvm_eiointc_ctrl_access(struct kvm_device * dev,struct kvm_device_attr * attr)802 static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
803 					struct kvm_device_attr *attr)
804 {
805 	int ret = 0;
806 	unsigned long flags;
807 	unsigned long type = (unsigned long)attr->attr;
808 	u32 i, start_irq, val;
809 	void __user *data;
810 	struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
811 
812 	data = (void __user *)attr->addr;
813 	spin_lock_irqsave(&s->lock, flags);
814 	switch (type) {
815 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
816 		if (copy_from_user(&val, data, 4))
817 			ret = -EFAULT;
818 		else {
819 			if (val >= EIOINTC_ROUTE_MAX_VCPUS)
820 				ret = -EINVAL;
821 			else
822 				s->num_cpu = val;
823 		}
824 		break;
825 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
826 		if (copy_from_user(&s->features, data, 4))
827 			ret = -EFAULT;
828 		if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
829 			s->status |= BIT(EIOINTC_ENABLE);
830 		break;
831 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
832 		eiointc_set_sw_coreisr(s);
833 		for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
834 			start_irq = i * 4;
835 			eiointc_update_sw_coremap(s, start_irq,
836 					s->coremap.reg_u32[i], sizeof(u32), false);
837 		}
838 		break;
839 	default:
840 		break;
841 	}
842 	spin_unlock_irqrestore(&s->lock, flags);
843 
844 	return ret;
845 }
846 
kvm_eiointc_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)847 static int kvm_eiointc_regs_access(struct kvm_device *dev,
848 					struct kvm_device_attr *attr,
849 					bool is_write)
850 {
851 	int addr, cpu, offset, ret = 0;
852 	unsigned long flags;
853 	void *p = NULL;
854 	void __user *data;
855 	struct loongarch_eiointc *s;
856 
857 	s = dev->kvm->arch.eiointc;
858 	addr = attr->attr;
859 	cpu = addr >> 16;
860 	addr &= 0xffff;
861 	data = (void __user *)attr->addr;
862 	switch (addr) {
863 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
864 		offset = (addr - EIOINTC_NODETYPE_START) / 4;
865 		p = &s->nodetype.reg_u32[offset];
866 		break;
867 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
868 		offset = (addr - EIOINTC_IPMAP_START) / 4;
869 		p = &s->ipmap.reg_u32[offset];
870 		break;
871 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
872 		offset = (addr - EIOINTC_ENABLE_START) / 4;
873 		p = &s->enable.reg_u32[offset];
874 		break;
875 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
876 		offset = (addr - EIOINTC_BOUNCE_START) / 4;
877 		p = &s->bounce.reg_u32[offset];
878 		break;
879 	case EIOINTC_ISR_START ... EIOINTC_ISR_END:
880 		offset = (addr - EIOINTC_ISR_START) / 4;
881 		p = &s->isr.reg_u32[offset];
882 		break;
883 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
884 		if (cpu >= s->num_cpu)
885 			return -EINVAL;
886 
887 		offset = (addr - EIOINTC_COREISR_START) / 4;
888 		p = &s->coreisr.reg_u32[cpu][offset];
889 		break;
890 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
891 		offset = (addr - EIOINTC_COREMAP_START) / 4;
892 		p = &s->coremap.reg_u32[offset];
893 		break;
894 	default:
895 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
896 		return -EINVAL;
897 	}
898 
899 	spin_lock_irqsave(&s->lock, flags);
900 	if (is_write) {
901 		if (copy_from_user(p, data, 4))
902 			ret = -EFAULT;
903 	} else {
904 		if (copy_to_user(data, p, 4))
905 			ret = -EFAULT;
906 	}
907 	spin_unlock_irqrestore(&s->lock, flags);
908 
909 	return ret;
910 }
911 
kvm_eiointc_sw_status_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)912 static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
913 					struct kvm_device_attr *attr,
914 					bool is_write)
915 {
916 	int addr, ret = 0;
917 	unsigned long flags;
918 	void *p = NULL;
919 	void __user *data;
920 	struct loongarch_eiointc *s;
921 
922 	s = dev->kvm->arch.eiointc;
923 	addr = attr->attr;
924 	addr &= 0xffff;
925 
926 	data = (void __user *)attr->addr;
927 	switch (addr) {
928 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
929 		if (is_write)
930 			return ret;
931 
932 		p = &s->num_cpu;
933 		break;
934 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
935 		if (is_write)
936 			return ret;
937 
938 		p = &s->features;
939 		break;
940 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
941 		p = &s->status;
942 		break;
943 	default:
944 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
945 		return -EINVAL;
946 	}
947 	spin_lock_irqsave(&s->lock, flags);
948 	if (is_write) {
949 		if (copy_from_user(p, data, 4))
950 			ret = -EFAULT;
951 	} else {
952 		if (copy_to_user(data, p, 4))
953 			ret = -EFAULT;
954 	}
955 	spin_unlock_irqrestore(&s->lock, flags);
956 
957 	return ret;
958 }
959 
kvm_eiointc_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)960 static int kvm_eiointc_get_attr(struct kvm_device *dev,
961 				struct kvm_device_attr *attr)
962 {
963 	switch (attr->group) {
964 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
965 		return kvm_eiointc_regs_access(dev, attr, false);
966 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
967 		return kvm_eiointc_sw_status_access(dev, attr, false);
968 	default:
969 		return -EINVAL;
970 	}
971 }
972 
kvm_eiointc_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)973 static int kvm_eiointc_set_attr(struct kvm_device *dev,
974 				struct kvm_device_attr *attr)
975 {
976 	switch (attr->group) {
977 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
978 		return kvm_eiointc_ctrl_access(dev, attr);
979 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
980 		return kvm_eiointc_regs_access(dev, attr, true);
981 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
982 		return kvm_eiointc_sw_status_access(dev, attr, true);
983 	default:
984 		return -EINVAL;
985 	}
986 }
987 
kvm_eiointc_create(struct kvm_device * dev,u32 type)988 static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
989 {
990 	int ret;
991 	struct loongarch_eiointc *s;
992 	struct kvm_io_device *device, *device1;
993 	struct kvm *kvm = dev->kvm;
994 
995 	/* eiointc has been created */
996 	if (kvm->arch.eiointc)
997 		return -EINVAL;
998 
999 	s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
1000 	if (!s)
1001 		return -ENOMEM;
1002 
1003 	spin_lock_init(&s->lock);
1004 	s->kvm = kvm;
1005 
1006 	/*
1007 	 * Initialize IOCSR device
1008 	 */
1009 	device = &s->device;
1010 	kvm_iodevice_init(device, &kvm_eiointc_ops);
1011 	mutex_lock(&kvm->slots_lock);
1012 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
1013 			EIOINTC_BASE, EIOINTC_SIZE, device);
1014 	mutex_unlock(&kvm->slots_lock);
1015 	if (ret < 0) {
1016 		kfree(s);
1017 		return ret;
1018 	}
1019 
1020 	device1 = &s->device_vext;
1021 	kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
1022 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
1023 			EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
1024 	if (ret < 0) {
1025 		kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
1026 		kfree(s);
1027 		return ret;
1028 	}
1029 	kvm->arch.eiointc = s;
1030 
1031 	return 0;
1032 }
1033 
kvm_eiointc_destroy(struct kvm_device * dev)1034 static void kvm_eiointc_destroy(struct kvm_device *dev)
1035 {
1036 	struct kvm *kvm;
1037 	struct loongarch_eiointc *eiointc;
1038 
1039 	if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
1040 		return;
1041 
1042 	kvm = dev->kvm;
1043 	eiointc = kvm->arch.eiointc;
1044 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
1045 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
1046 	kfree(eiointc);
1047 }
1048 
1049 static struct kvm_device_ops kvm_eiointc_dev_ops = {
1050 	.name = "kvm-loongarch-eiointc",
1051 	.create = kvm_eiointc_create,
1052 	.destroy = kvm_eiointc_destroy,
1053 	.set_attr = kvm_eiointc_set_attr,
1054 	.get_attr = kvm_eiointc_get_attr,
1055 };
1056 
kvm_loongarch_register_eiointc_device(void)1057 int kvm_loongarch_register_eiointc_device(void)
1058 {
1059 	return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
1060 }
1061