1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
4 */
5
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
9
eiointc_set_sw_coreisr(struct loongarch_eiointc * s)10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11 {
12 int ipnum, cpu, cpuid, irq;
13 struct kvm_vcpu *vcpu;
14
15 for (irq = 0; irq < EIOINTC_IRQS; irq++) {
16 ipnum = s->ipmap.reg_u8[irq / 32];
17 if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
18 ipnum = count_trailing_zeros(ipnum);
19 ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
20 }
21
22 cpuid = s->coremap.reg_u8[irq];
23 vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
24 if (!vcpu)
25 continue;
26
27 cpu = vcpu->vcpu_id;
28 if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
29 __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
30 else
31 __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
32 }
33 }
34
eiointc_update_irq(struct loongarch_eiointc * s,int irq,int level)35 static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
36 {
37 int ipnum, cpu, found;
38 struct kvm_vcpu *vcpu;
39 struct kvm_interrupt vcpu_irq;
40
41 ipnum = s->ipmap.reg_u8[irq / 32];
42 if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
43 ipnum = count_trailing_zeros(ipnum);
44 ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
45 }
46
47 cpu = s->sw_coremap[irq];
48 vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
49 if (unlikely(vcpu == NULL)) {
50 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
51 return;
52 }
53
54 if (level) {
55 /* if not enable return false */
56 if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
57 return;
58 __set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
59 found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
60 __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
61 } else {
62 __clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
63 __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
64 found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
65 }
66
67 if (found < EIOINTC_IRQS)
68 return; /* other irq is handling, needn't update parent irq */
69
70 vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
71 kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
72 }
73
eiointc_update_sw_coremap(struct loongarch_eiointc * s,int irq,u64 val,u32 len,bool notify)74 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
75 int irq, u64 val, u32 len, bool notify)
76 {
77 int i, cpu, cpuid;
78 struct kvm_vcpu *vcpu;
79
80 for (i = 0; i < len; i++) {
81 cpuid = val & 0xff;
82 val = val >> 8;
83
84 if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
85 cpuid = ffs(cpuid) - 1;
86 cpuid = (cpuid >= 4) ? 0 : cpuid;
87 }
88
89 vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
90 if (!vcpu)
91 continue;
92
93 cpu = vcpu->vcpu_id;
94 if (s->sw_coremap[irq + i] == cpu)
95 continue;
96
97 if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
98 /* lower irq at old cpu and raise irq at new cpu */
99 eiointc_update_irq(s, irq + i, 0);
100 s->sw_coremap[irq + i] = cpu;
101 eiointc_update_irq(s, irq + i, 1);
102 } else {
103 s->sw_coremap[irq + i] = cpu;
104 }
105 }
106 }
107
eiointc_set_irq(struct loongarch_eiointc * s,int irq,int level)108 void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
109 {
110 unsigned long flags;
111 unsigned long *isr = (unsigned long *)s->isr.reg_u8;
112
113 spin_lock_irqsave(&s->lock, flags);
114 level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
115 eiointc_update_irq(s, irq, level);
116 spin_unlock_irqrestore(&s->lock, flags);
117 }
118
loongarch_eiointc_read(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,unsigned long * val)119 static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
120 gpa_t addr, unsigned long *val)
121 {
122 int index, ret = 0;
123 u64 data = 0;
124 gpa_t offset;
125
126 offset = addr - EIOINTC_BASE;
127 switch (offset) {
128 case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
129 index = (offset - EIOINTC_NODETYPE_START) >> 3;
130 data = s->nodetype.reg_u64[index];
131 break;
132 case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
133 index = (offset - EIOINTC_IPMAP_START) >> 3;
134 data = s->ipmap.reg_u64;
135 break;
136 case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
137 index = (offset - EIOINTC_ENABLE_START) >> 3;
138 data = s->enable.reg_u64[index];
139 break;
140 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
141 index = (offset - EIOINTC_BOUNCE_START) >> 3;
142 data = s->bounce.reg_u64[index];
143 break;
144 case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
145 index = (offset - EIOINTC_COREISR_START) >> 3;
146 data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
147 break;
148 case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
149 index = (offset - EIOINTC_COREMAP_START) >> 3;
150 data = s->coremap.reg_u64[index];
151 break;
152 default:
153 ret = -EINVAL;
154 break;
155 }
156 *val = data;
157
158 return ret;
159 }
160
kvm_eiointc_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)161 static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
162 struct kvm_io_device *dev,
163 gpa_t addr, int len, void *val)
164 {
165 int ret = -EINVAL;
166 unsigned long flags, data, offset;
167 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
168
169 if (!eiointc) {
170 kvm_err("%s: eiointc irqchip not valid!\n", __func__);
171 return -EINVAL;
172 }
173
174 if (addr & (len - 1)) {
175 kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
176 return -EINVAL;
177 }
178
179 offset = addr & 0x7;
180 addr -= offset;
181 vcpu->stat.eiointc_read_exits++;
182 spin_lock_irqsave(&eiointc->lock, flags);
183 ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
184 spin_unlock_irqrestore(&eiointc->lock, flags);
185 if (ret)
186 return ret;
187
188 data = data >> (offset * 8);
189 switch (len) {
190 case 1:
191 *(long *)val = (s8)data;
192 break;
193 case 2:
194 *(long *)val = (s16)data;
195 break;
196 case 4:
197 *(long *)val = (s32)data;
198 break;
199 default:
200 *(long *)val = (long)data;
201 break;
202 }
203
204 return 0;
205 }
206
loongarch_eiointc_write(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,u64 value,u64 field_mask)207 static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
208 struct loongarch_eiointc *s,
209 gpa_t addr, u64 value, u64 field_mask)
210 {
211 int index, irq, ret = 0;
212 u8 cpu;
213 u64 data, old, mask;
214 gpa_t offset;
215
216 offset = addr & 7;
217 mask = field_mask << (offset * 8);
218 data = (value & field_mask) << (offset * 8);
219
220 addr -= offset;
221 offset = addr - EIOINTC_BASE;
222
223 switch (offset) {
224 case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
225 index = (offset - EIOINTC_NODETYPE_START) >> 3;
226 old = s->nodetype.reg_u64[index];
227 s->nodetype.reg_u64[index] = (old & ~mask) | data;
228 break;
229 case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
230 /*
231 * ipmap cannot be set at runtime, can be set only at the beginning
232 * of irqchip driver, need not update upper irq level
233 */
234 old = s->ipmap.reg_u64;
235 s->ipmap.reg_u64 = (old & ~mask) | data;
236 break;
237 case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
238 index = (offset - EIOINTC_ENABLE_START) >> 3;
239 old = s->enable.reg_u64[index];
240 s->enable.reg_u64[index] = (old & ~mask) | data;
241 /*
242 * 1: enable irq.
243 * update irq when isr is set.
244 */
245 data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
246 while (data) {
247 irq = __ffs(data);
248 eiointc_update_irq(s, irq + index * 64, 1);
249 data &= ~BIT_ULL(irq);
250 }
251 /*
252 * 0: disable irq.
253 * update irq when isr is set.
254 */
255 data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
256 while (data) {
257 irq = __ffs(data);
258 eiointc_update_irq(s, irq + index * 64, 0);
259 data &= ~BIT_ULL(irq);
260 }
261 break;
262 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
263 /* do not emulate hw bounced irq routing */
264 index = (offset - EIOINTC_BOUNCE_START) >> 3;
265 old = s->bounce.reg_u64[index];
266 s->bounce.reg_u64[index] = (old & ~mask) | data;
267 break;
268 case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
269 index = (offset - EIOINTC_COREISR_START) >> 3;
270 /* use attrs to get current cpu index */
271 cpu = vcpu->vcpu_id;
272 old = s->coreisr.reg_u64[cpu][index];
273 /* write 1 to clear interrupt */
274 s->coreisr.reg_u64[cpu][index] = old & ~data;
275 data &= old;
276 while (data) {
277 irq = __ffs(data);
278 eiointc_update_irq(s, irq + index * 64, 0);
279 data &= ~BIT_ULL(irq);
280 }
281 break;
282 case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
283 index = (offset - EIOINTC_COREMAP_START) >> 3;
284 old = s->coremap.reg_u64[index];
285 s->coremap.reg_u64[index] = (old & ~mask) | data;
286 data = s->coremap.reg_u64[index];
287 eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
288 break;
289 default:
290 ret = -EINVAL;
291 break;
292 }
293
294 return ret;
295 }
296
kvm_eiointc_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)297 static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
298 struct kvm_io_device *dev,
299 gpa_t addr, int len, const void *val)
300 {
301 int ret = -EINVAL;
302 unsigned long flags, value;
303 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
304
305 if (!eiointc) {
306 kvm_err("%s: eiointc irqchip not valid!\n", __func__);
307 return -EINVAL;
308 }
309
310 if (addr & (len - 1)) {
311 kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
312 return -EINVAL;
313 }
314
315 vcpu->stat.eiointc_write_exits++;
316 spin_lock_irqsave(&eiointc->lock, flags);
317 switch (len) {
318 case 1:
319 value = *(unsigned char *)val;
320 ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
321 break;
322 case 2:
323 value = *(unsigned short *)val;
324 ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
325 break;
326 case 4:
327 value = *(unsigned int *)val;
328 ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
329 break;
330 default:
331 value = *(unsigned long *)val;
332 ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
333 break;
334 }
335 spin_unlock_irqrestore(&eiointc->lock, flags);
336
337 return ret;
338 }
339
340 static const struct kvm_io_device_ops kvm_eiointc_ops = {
341 .read = kvm_eiointc_read,
342 .write = kvm_eiointc_write,
343 };
344
kvm_eiointc_virt_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)345 static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
346 struct kvm_io_device *dev,
347 gpa_t addr, int len, void *val)
348 {
349 unsigned long flags;
350 u32 *data = val;
351 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
352
353 if (!eiointc) {
354 kvm_err("%s: eiointc irqchip not valid!\n", __func__);
355 return -EINVAL;
356 }
357
358 addr -= EIOINTC_VIRT_BASE;
359 spin_lock_irqsave(&eiointc->lock, flags);
360 switch (addr) {
361 case EIOINTC_VIRT_FEATURES:
362 *data = eiointc->features;
363 break;
364 case EIOINTC_VIRT_CONFIG:
365 *data = eiointc->status;
366 break;
367 default:
368 break;
369 }
370 spin_unlock_irqrestore(&eiointc->lock, flags);
371
372 return 0;
373 }
374
kvm_eiointc_virt_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)375 static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
376 struct kvm_io_device *dev,
377 gpa_t addr, int len, const void *val)
378 {
379 int ret = 0;
380 unsigned long flags;
381 u32 value = *(u32 *)val;
382 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
383
384 if (!eiointc) {
385 kvm_err("%s: eiointc irqchip not valid!\n", __func__);
386 return -EINVAL;
387 }
388
389 addr -= EIOINTC_VIRT_BASE;
390 spin_lock_irqsave(&eiointc->lock, flags);
391 switch (addr) {
392 case EIOINTC_VIRT_FEATURES:
393 ret = -EPERM;
394 break;
395 case EIOINTC_VIRT_CONFIG:
396 /*
397 * eiointc features can only be set at disabled status
398 */
399 if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
400 ret = -EPERM;
401 break;
402 }
403 eiointc->status = value & eiointc->features;
404 break;
405 default:
406 break;
407 }
408 spin_unlock_irqrestore(&eiointc->lock, flags);
409
410 return ret;
411 }
412
413 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
414 .read = kvm_eiointc_virt_read,
415 .write = kvm_eiointc_virt_write,
416 };
417
kvm_eiointc_ctrl_access(struct kvm_device * dev,struct kvm_device_attr * attr)418 static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
419 struct kvm_device_attr *attr)
420 {
421 int ret = 0;
422 unsigned long flags;
423 unsigned long type = (unsigned long)attr->attr;
424 u32 i, start_irq, val;
425 void __user *data;
426 struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
427
428 data = (void __user *)attr->addr;
429 switch (type) {
430 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
431 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
432 if (copy_from_user(&val, data, 4))
433 return -EFAULT;
434 break;
435 default:
436 break;
437 }
438
439 spin_lock_irqsave(&s->lock, flags);
440 switch (type) {
441 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
442 if (val >= EIOINTC_ROUTE_MAX_VCPUS)
443 ret = -EINVAL;
444 else
445 s->num_cpu = val;
446 break;
447 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
448 s->features = val;
449 if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
450 s->status |= BIT(EIOINTC_ENABLE);
451 break;
452 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
453 eiointc_set_sw_coreisr(s);
454 for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
455 start_irq = i * 4;
456 eiointc_update_sw_coremap(s, start_irq,
457 s->coremap.reg_u32[i], sizeof(u32), false);
458 }
459 break;
460 default:
461 break;
462 }
463 spin_unlock_irqrestore(&s->lock, flags);
464
465 return ret;
466 }
467
kvm_eiointc_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write,int * data)468 static int kvm_eiointc_regs_access(struct kvm_device *dev,
469 struct kvm_device_attr *attr,
470 bool is_write, int *data)
471 {
472 int addr, cpu, offset, ret = 0;
473 unsigned long flags;
474 void *p = NULL;
475 struct loongarch_eiointc *s;
476
477 s = dev->kvm->arch.eiointc;
478 addr = attr->attr;
479 cpu = addr >> 16;
480 addr &= 0xffff;
481 switch (addr) {
482 case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
483 offset = (addr - EIOINTC_NODETYPE_START) / 4;
484 p = &s->nodetype.reg_u32[offset];
485 break;
486 case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
487 offset = (addr - EIOINTC_IPMAP_START) / 4;
488 p = &s->ipmap.reg_u32[offset];
489 break;
490 case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
491 offset = (addr - EIOINTC_ENABLE_START) / 4;
492 p = &s->enable.reg_u32[offset];
493 break;
494 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
495 offset = (addr - EIOINTC_BOUNCE_START) / 4;
496 p = &s->bounce.reg_u32[offset];
497 break;
498 case EIOINTC_ISR_START ... EIOINTC_ISR_END:
499 offset = (addr - EIOINTC_ISR_START) / 4;
500 p = &s->isr.reg_u32[offset];
501 break;
502 case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
503 if (cpu >= s->num_cpu)
504 return -EINVAL;
505
506 offset = (addr - EIOINTC_COREISR_START) / 4;
507 p = &s->coreisr.reg_u32[cpu][offset];
508 break;
509 case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
510 offset = (addr - EIOINTC_COREMAP_START) / 4;
511 p = &s->coremap.reg_u32[offset];
512 break;
513 default:
514 kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
515 return -EINVAL;
516 }
517
518 spin_lock_irqsave(&s->lock, flags);
519 if (is_write)
520 memcpy(p, data, 4);
521 else
522 memcpy(data, p, 4);
523 spin_unlock_irqrestore(&s->lock, flags);
524
525 return ret;
526 }
527
kvm_eiointc_sw_status_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write,int * data)528 static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
529 struct kvm_device_attr *attr,
530 bool is_write, int *data)
531 {
532 int addr, ret = 0;
533 unsigned long flags;
534 void *p = NULL;
535 struct loongarch_eiointc *s;
536
537 s = dev->kvm->arch.eiointc;
538 addr = attr->attr;
539 addr &= 0xffff;
540
541 switch (addr) {
542 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
543 if (is_write)
544 return ret;
545
546 p = &s->num_cpu;
547 break;
548 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
549 if (is_write)
550 return ret;
551
552 p = &s->features;
553 break;
554 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
555 p = &s->status;
556 break;
557 default:
558 kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
559 return -EINVAL;
560 }
561 spin_lock_irqsave(&s->lock, flags);
562 if (is_write)
563 memcpy(p, data, 4);
564 else
565 memcpy(data, p, 4);
566 spin_unlock_irqrestore(&s->lock, flags);
567
568 return ret;
569 }
570
kvm_eiointc_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)571 static int kvm_eiointc_get_attr(struct kvm_device *dev,
572 struct kvm_device_attr *attr)
573 {
574 int ret, data;
575
576 switch (attr->group) {
577 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
578 ret = kvm_eiointc_regs_access(dev, attr, false, &data);
579 if (ret)
580 return ret;
581
582 if (copy_to_user((void __user *)attr->addr, &data, 4))
583 ret = -EFAULT;
584
585 return ret;
586 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
587 ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
588 if (ret)
589 return ret;
590
591 if (copy_to_user((void __user *)attr->addr, &data, 4))
592 ret = -EFAULT;
593
594 return ret;
595 default:
596 return -EINVAL;
597 }
598 }
599
kvm_eiointc_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)600 static int kvm_eiointc_set_attr(struct kvm_device *dev,
601 struct kvm_device_attr *attr)
602 {
603 int data;
604
605 switch (attr->group) {
606 case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
607 return kvm_eiointc_ctrl_access(dev, attr);
608 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
609 if (copy_from_user(&data, (void __user *)attr->addr, 4))
610 return -EFAULT;
611
612 return kvm_eiointc_regs_access(dev, attr, true, &data);
613 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
614 if (copy_from_user(&data, (void __user *)attr->addr, 4))
615 return -EFAULT;
616
617 return kvm_eiointc_sw_status_access(dev, attr, true, &data);
618 default:
619 return -EINVAL;
620 }
621 }
622
kvm_eiointc_create(struct kvm_device * dev,u32 type)623 static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
624 {
625 int ret;
626 struct loongarch_eiointc *s;
627 struct kvm_io_device *device;
628 struct kvm *kvm = dev->kvm;
629
630 /* eiointc has been created */
631 if (kvm->arch.eiointc)
632 return -EINVAL;
633
634 s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
635 if (!s)
636 return -ENOMEM;
637
638 spin_lock_init(&s->lock);
639 s->kvm = kvm;
640
641 /*
642 * Initialize IOCSR device
643 */
644 device = &s->device;
645 kvm_iodevice_init(device, &kvm_eiointc_ops);
646 mutex_lock(&kvm->slots_lock);
647 ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
648 EIOINTC_BASE, EIOINTC_SIZE, device);
649 mutex_unlock(&kvm->slots_lock);
650 if (ret < 0) {
651 kfree(s);
652 return ret;
653 }
654
655 device = &s->device_vext;
656 kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
657 ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
658 EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
659 if (ret < 0) {
660 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
661 kfree(s);
662 return ret;
663 }
664 kvm->arch.eiointc = s;
665
666 return 0;
667 }
668
kvm_eiointc_destroy(struct kvm_device * dev)669 static void kvm_eiointc_destroy(struct kvm_device *dev)
670 {
671 struct kvm *kvm;
672 struct loongarch_eiointc *eiointc;
673
674 if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
675 return;
676
677 kvm = dev->kvm;
678 eiointc = kvm->arch.eiointc;
679 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
680 kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
681 kfree(eiointc);
682 }
683
684 static struct kvm_device_ops kvm_eiointc_dev_ops = {
685 .name = "kvm-loongarch-eiointc",
686 .create = kvm_eiointc_create,
687 .destroy = kvm_eiointc_destroy,
688 .set_attr = kvm_eiointc_set_attr,
689 .get_attr = kvm_eiointc_get_attr,
690 };
691
kvm_loongarch_register_eiointc_device(void)692 int kvm_loongarch_register_eiointc_device(void)
693 {
694 return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
695 }
696