1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/bits.h>
11 #include <linux/irqchip/riscv-imsic.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14 #include <linux/cpufeature.h>
15
aia_create(struct kvm_device * dev,u32 type)16 static int aia_create(struct kvm_device *dev, u32 type)
17 {
18 int ret;
19 unsigned long i;
20 struct kvm *kvm = dev->kvm;
21 struct kvm_vcpu *vcpu;
22
23 if (irqchip_in_kernel(kvm))
24 return -EEXIST;
25
26 if (!riscv_isa_extension_available(NULL, SSAIA))
27 return -ENODEV;
28
29 ret = -EBUSY;
30 if (kvm_trylock_all_vcpus(kvm))
31 return ret;
32
33 kvm_for_each_vcpu(i, vcpu, kvm) {
34 if (vcpu->arch.ran_atleast_once)
35 goto out_unlock;
36 }
37 ret = 0;
38
39 kvm->arch.aia.in_kernel = true;
40
41 out_unlock:
42 kvm_unlock_all_vcpus(kvm);
43 return ret;
44 }
45
aia_destroy(struct kvm_device * dev)46 static void aia_destroy(struct kvm_device *dev)
47 {
48 kfree(dev);
49 }
50
aia_config(struct kvm * kvm,unsigned long type,u32 * nr,bool write)51 static int aia_config(struct kvm *kvm, unsigned long type,
52 u32 *nr, bool write)
53 {
54 struct kvm_aia *aia = &kvm->arch.aia;
55
56 /* Writes can only be done before irqchip is initialized */
57 if (write && kvm_riscv_aia_initialized(kvm))
58 return -EBUSY;
59
60 switch (type) {
61 case KVM_DEV_RISCV_AIA_CONFIG_MODE:
62 if (write) {
63 switch (*nr) {
64 case KVM_DEV_RISCV_AIA_MODE_EMUL:
65 break;
66 case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
67 case KVM_DEV_RISCV_AIA_MODE_AUTO:
68 /*
69 * HW Acceleration and Auto modes only
70 * supported on host with non-zero guest
71 * external interrupts (i.e. non-zero
72 * VS-level IMSIC pages).
73 */
74 if (!kvm_riscv_aia_nr_hgei)
75 return -EINVAL;
76 break;
77 default:
78 return -EINVAL;
79 }
80 aia->mode = *nr;
81 } else
82 *nr = aia->mode;
83 break;
84 case KVM_DEV_RISCV_AIA_CONFIG_IDS:
85 if (write) {
86 if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
87 (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
88 ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
89 KVM_DEV_RISCV_AIA_IDS_MIN) ||
90 (kvm_riscv_aia_max_ids <= *nr))
91 return -EINVAL;
92 aia->nr_ids = *nr;
93 } else
94 *nr = aia->nr_ids;
95 break;
96 case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
97 if (write) {
98 if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
99 (*nr >= kvm_riscv_aia_max_ids))
100 return -EINVAL;
101 aia->nr_sources = *nr;
102 } else
103 *nr = aia->nr_sources;
104 break;
105 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
106 if (write) {
107 if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
108 return -EINVAL;
109 aia->nr_group_bits = *nr;
110 } else
111 *nr = aia->nr_group_bits;
112 break;
113 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
114 if (write) {
115 if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
116 (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
117 return -EINVAL;
118 aia->nr_group_shift = *nr;
119 } else
120 *nr = aia->nr_group_shift;
121 break;
122 case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
123 if (write) {
124 if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
125 return -EINVAL;
126 aia->nr_hart_bits = *nr;
127 } else
128 *nr = aia->nr_hart_bits;
129 break;
130 case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
131 if (write) {
132 if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
133 return -EINVAL;
134 aia->nr_guest_bits = *nr;
135 } else
136 *nr = aia->nr_guest_bits;
137 break;
138 default:
139 return -ENXIO;
140 }
141
142 return 0;
143 }
144
aia_aplic_addr(struct kvm * kvm,u64 * addr,bool write)145 static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
146 {
147 struct kvm_aia *aia = &kvm->arch.aia;
148
149 if (write) {
150 /* Writes can only be done before irqchip is initialized */
151 if (kvm_riscv_aia_initialized(kvm))
152 return -EBUSY;
153
154 if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
155 return -EINVAL;
156
157 aia->aplic_addr = *addr;
158 } else
159 *addr = aia->aplic_addr;
160
161 return 0;
162 }
163
aia_imsic_addr(struct kvm * kvm,u64 * addr,unsigned long vcpu_idx,bool write)164 static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
165 unsigned long vcpu_idx, bool write)
166 {
167 struct kvm_vcpu *vcpu;
168 struct kvm_vcpu_aia *vcpu_aia;
169
170 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
171 if (!vcpu)
172 return -EINVAL;
173 vcpu_aia = &vcpu->arch.aia_context;
174
175 if (write) {
176 /* Writes can only be done before irqchip is initialized */
177 if (kvm_riscv_aia_initialized(kvm))
178 return -EBUSY;
179
180 if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
181 return -EINVAL;
182 }
183
184 mutex_lock(&vcpu->mutex);
185 if (write)
186 vcpu_aia->imsic_addr = *addr;
187 else
188 *addr = vcpu_aia->imsic_addr;
189 mutex_unlock(&vcpu->mutex);
190
191 return 0;
192 }
193
aia_imsic_ppn(struct kvm_aia * aia,gpa_t addr)194 static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
195 {
196 u32 h, l;
197 gpa_t mask = 0;
198
199 h = aia->nr_hart_bits + aia->nr_guest_bits +
200 IMSIC_MMIO_PAGE_SHIFT - 1;
201 mask = GENMASK_ULL(h, 0);
202
203 if (aia->nr_group_bits) {
204 h = aia->nr_group_bits + aia->nr_group_shift - 1;
205 l = aia->nr_group_shift;
206 mask |= GENMASK_ULL(h, l);
207 }
208
209 return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
210 }
211
aia_imsic_hart_index(struct kvm_aia * aia,gpa_t addr)212 static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
213 {
214 u32 hart = 0, group = 0;
215
216 if (aia->nr_hart_bits)
217 hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
218 GENMASK_ULL(aia->nr_hart_bits - 1, 0);
219 if (aia->nr_group_bits)
220 group = (addr >> aia->nr_group_shift) &
221 GENMASK_ULL(aia->nr_group_bits - 1, 0);
222
223 return (group << aia->nr_hart_bits) | hart;
224 }
225
aia_init(struct kvm * kvm)226 static int aia_init(struct kvm *kvm)
227 {
228 int ret, i;
229 unsigned long idx;
230 struct kvm_vcpu *vcpu;
231 struct kvm_vcpu_aia *vaia;
232 struct kvm_aia *aia = &kvm->arch.aia;
233 gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
234
235 /* Irqchip can be initialized only once */
236 if (kvm_riscv_aia_initialized(kvm))
237 return -EBUSY;
238
239 /* We might be in the middle of creating a VCPU? */
240 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
241 return -EBUSY;
242
243 /* Number of sources should be less than or equals number of IDs */
244 if (aia->nr_ids < aia->nr_sources)
245 return -EINVAL;
246
247 /* APLIC base is required for non-zero number of sources */
248 if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
249 return -EINVAL;
250
251 /* Initialize APLIC */
252 ret = kvm_riscv_aia_aplic_init(kvm);
253 if (ret)
254 return ret;
255
256 /* Iterate over each VCPU */
257 kvm_for_each_vcpu(idx, vcpu, kvm) {
258 vaia = &vcpu->arch.aia_context;
259
260 /* IMSIC base is required */
261 if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
262 ret = -EINVAL;
263 goto fail_cleanup_imsics;
264 }
265
266 /* All IMSICs should have matching base PPN */
267 if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
268 base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
269 if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
270 ret = -EINVAL;
271 goto fail_cleanup_imsics;
272 }
273
274 /* Update HART index of the IMSIC based on IMSIC base */
275 vaia->hart_index = aia_imsic_hart_index(aia,
276 vaia->imsic_addr);
277
278 /* Initialize IMSIC for this VCPU */
279 ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
280 if (ret)
281 goto fail_cleanup_imsics;
282 }
283
284 /* Set the initialized flag */
285 kvm->arch.aia.initialized = true;
286
287 return 0;
288
289 fail_cleanup_imsics:
290 for (i = idx - 1; i >= 0; i--) {
291 vcpu = kvm_get_vcpu(kvm, i);
292 if (!vcpu)
293 continue;
294 kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
295 }
296 kvm_riscv_aia_aplic_cleanup(kvm);
297 return ret;
298 }
299
aia_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)300 static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
301 {
302 u32 nr;
303 u64 addr;
304 int nr_vcpus, r = -ENXIO;
305 unsigned long v, type = (unsigned long)attr->attr;
306 void __user *uaddr = (void __user *)(long)attr->addr;
307
308 switch (attr->group) {
309 case KVM_DEV_RISCV_AIA_GRP_CONFIG:
310 if (copy_from_user(&nr, uaddr, sizeof(nr)))
311 return -EFAULT;
312
313 mutex_lock(&dev->kvm->lock);
314 r = aia_config(dev->kvm, type, &nr, true);
315 mutex_unlock(&dev->kvm->lock);
316
317 break;
318
319 case KVM_DEV_RISCV_AIA_GRP_ADDR:
320 if (copy_from_user(&addr, uaddr, sizeof(addr)))
321 return -EFAULT;
322
323 nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
324 mutex_lock(&dev->kvm->lock);
325 if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
326 r = aia_aplic_addr(dev->kvm, &addr, true);
327 else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
328 r = aia_imsic_addr(dev->kvm, &addr,
329 type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
330 mutex_unlock(&dev->kvm->lock);
331
332 break;
333
334 case KVM_DEV_RISCV_AIA_GRP_CTRL:
335 switch (type) {
336 case KVM_DEV_RISCV_AIA_CTRL_INIT:
337 mutex_lock(&dev->kvm->lock);
338 r = aia_init(dev->kvm);
339 mutex_unlock(&dev->kvm->lock);
340 break;
341 }
342
343 break;
344 case KVM_DEV_RISCV_AIA_GRP_APLIC:
345 if (copy_from_user(&nr, uaddr, sizeof(nr)))
346 return -EFAULT;
347
348 mutex_lock(&dev->kvm->lock);
349 r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
350 mutex_unlock(&dev->kvm->lock);
351
352 break;
353 case KVM_DEV_RISCV_AIA_GRP_IMSIC:
354 if (copy_from_user(&v, uaddr, sizeof(v)))
355 return -EFAULT;
356
357 mutex_lock(&dev->kvm->lock);
358 r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
359 mutex_unlock(&dev->kvm->lock);
360
361 break;
362 }
363
364 return r;
365 }
366
aia_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)367 static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
368 {
369 u32 nr;
370 u64 addr;
371 int nr_vcpus, r = -ENXIO;
372 void __user *uaddr = (void __user *)(long)attr->addr;
373 unsigned long v, type = (unsigned long)attr->attr;
374
375 switch (attr->group) {
376 case KVM_DEV_RISCV_AIA_GRP_CONFIG:
377 if (copy_from_user(&nr, uaddr, sizeof(nr)))
378 return -EFAULT;
379
380 mutex_lock(&dev->kvm->lock);
381 r = aia_config(dev->kvm, type, &nr, false);
382 mutex_unlock(&dev->kvm->lock);
383 if (r)
384 return r;
385
386 if (copy_to_user(uaddr, &nr, sizeof(nr)))
387 return -EFAULT;
388
389 break;
390 case KVM_DEV_RISCV_AIA_GRP_ADDR:
391 if (copy_from_user(&addr, uaddr, sizeof(addr)))
392 return -EFAULT;
393
394 nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
395 mutex_lock(&dev->kvm->lock);
396 if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
397 r = aia_aplic_addr(dev->kvm, &addr, false);
398 else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
399 r = aia_imsic_addr(dev->kvm, &addr,
400 type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
401 mutex_unlock(&dev->kvm->lock);
402 if (r)
403 return r;
404
405 if (copy_to_user(uaddr, &addr, sizeof(addr)))
406 return -EFAULT;
407
408 break;
409 case KVM_DEV_RISCV_AIA_GRP_APLIC:
410 if (copy_from_user(&nr, uaddr, sizeof(nr)))
411 return -EFAULT;
412
413 mutex_lock(&dev->kvm->lock);
414 r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
415 mutex_unlock(&dev->kvm->lock);
416 if (r)
417 return r;
418
419 if (copy_to_user(uaddr, &nr, sizeof(nr)))
420 return -EFAULT;
421
422 break;
423 case KVM_DEV_RISCV_AIA_GRP_IMSIC:
424 if (copy_from_user(&v, uaddr, sizeof(v)))
425 return -EFAULT;
426
427 mutex_lock(&dev->kvm->lock);
428 r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
429 mutex_unlock(&dev->kvm->lock);
430 if (r)
431 return r;
432
433 if (copy_to_user(uaddr, &v, sizeof(v)))
434 return -EFAULT;
435
436 break;
437 }
438
439 return r;
440 }
441
aia_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)442 static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
443 {
444 int nr_vcpus, r = -ENXIO;
445
446 switch (attr->group) {
447 case KVM_DEV_RISCV_AIA_GRP_CONFIG:
448 switch (attr->attr) {
449 case KVM_DEV_RISCV_AIA_CONFIG_MODE:
450 case KVM_DEV_RISCV_AIA_CONFIG_IDS:
451 case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
452 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
453 case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
454 case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
455 case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
456 return 0;
457 }
458 break;
459 case KVM_DEV_RISCV_AIA_GRP_ADDR:
460 nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
461 if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
462 return 0;
463 else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
464 return 0;
465 break;
466 case KVM_DEV_RISCV_AIA_GRP_CTRL:
467 switch (attr->attr) {
468 case KVM_DEV_RISCV_AIA_CTRL_INIT:
469 return 0;
470 }
471 break;
472 case KVM_DEV_RISCV_AIA_GRP_APLIC:
473 mutex_lock(&dev->kvm->lock);
474 r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
475 mutex_unlock(&dev->kvm->lock);
476 break;
477 case KVM_DEV_RISCV_AIA_GRP_IMSIC:
478 mutex_lock(&dev->kvm->lock);
479 r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
480 mutex_unlock(&dev->kvm->lock);
481 break;
482 }
483
484 return r;
485 }
486
487 struct kvm_device_ops kvm_riscv_aia_device_ops = {
488 .name = "kvm-riscv-aia",
489 .create = aia_create,
490 .destroy = aia_destroy,
491 .set_attr = aia_set_attr,
492 .get_attr = aia_get_attr,
493 .has_attr = aia_has_attr,
494 };
495
kvm_riscv_vcpu_aia_update(struct kvm_vcpu * vcpu)496 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
497 {
498 /* Proceed only if AIA was initialized successfully */
499 if (!kvm_riscv_aia_initialized(vcpu->kvm))
500 return 1;
501
502 /* Update the IMSIC HW state before entering guest mode */
503 return kvm_riscv_vcpu_aia_imsic_update(vcpu);
504 }
505
kvm_riscv_vcpu_aia_reset(struct kvm_vcpu * vcpu)506 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
507 {
508 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
509
510 if (!kvm_riscv_aia_available())
511 return;
512 memset(csr, 0, sizeof(*csr));
513
514 /* Proceed only if AIA was initialized successfully */
515 if (!kvm_riscv_aia_initialized(vcpu->kvm))
516 return;
517
518 /* Reset the IMSIC context */
519 kvm_riscv_vcpu_aia_imsic_reset(vcpu);
520 }
521
kvm_riscv_vcpu_aia_init(struct kvm_vcpu * vcpu)522 void kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
523 {
524 struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
525
526 if (!kvm_riscv_aia_available())
527 return;
528
529 /*
530 * We don't do any memory allocations over here because these
531 * will be done after AIA device is initialized by the user-space.
532 *
533 * Refer, aia_init() implementation for more details.
534 */
535
536 /* Initialize default values in AIA vcpu context */
537 vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
538 vaia->hart_index = vcpu->vcpu_idx;
539 }
540
kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu * vcpu)541 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
542 {
543 /* Proceed only if AIA was initialized successfully */
544 if (!kvm_riscv_aia_initialized(vcpu->kvm))
545 return;
546
547 /* Cleanup IMSIC context */
548 kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
549 }
550
kvm_riscv_aia_inject_msi_by_id(struct kvm * kvm,u32 hart_index,u32 guest_index,u32 iid)551 int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
552 u32 guest_index, u32 iid)
553 {
554 unsigned long idx;
555 struct kvm_vcpu *vcpu;
556
557 /* Proceed only if AIA was initialized successfully */
558 if (!kvm_riscv_aia_initialized(kvm))
559 return -EBUSY;
560
561 /* Inject MSI to matching VCPU */
562 kvm_for_each_vcpu(idx, vcpu, kvm) {
563 if (vcpu->arch.aia_context.hart_index == hart_index)
564 return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
565 guest_index,
566 0, iid);
567 }
568
569 return 0;
570 }
571
kvm_riscv_aia_inject_msi(struct kvm * kvm,struct kvm_msi * msi)572 int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
573 {
574 gpa_t tppn, ippn;
575 unsigned long idx;
576 struct kvm_vcpu *vcpu;
577 u32 g, toff, iid = msi->data;
578 struct kvm_aia *aia = &kvm->arch.aia;
579 gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
580
581 /* Proceed only if AIA was initialized successfully */
582 if (!kvm_riscv_aia_initialized(kvm))
583 return -EBUSY;
584
585 /* Convert target address to target PPN */
586 tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
587
588 /* Extract and clear Guest ID from target PPN */
589 g = tppn & (BIT(aia->nr_guest_bits) - 1);
590 tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
591
592 /* Inject MSI to matching VCPU */
593 kvm_for_each_vcpu(idx, vcpu, kvm) {
594 ippn = vcpu->arch.aia_context.imsic_addr >>
595 IMSIC_MMIO_PAGE_SHIFT;
596 if (ippn == tppn) {
597 toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
598 return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
599 toff, iid);
600 }
601 }
602
603 return 0;
604 }
605
kvm_riscv_aia_inject_irq(struct kvm * kvm,unsigned int irq,bool level)606 int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
607 {
608 /* Proceed only if AIA was initialized successfully */
609 if (!kvm_riscv_aia_initialized(kvm))
610 return -EBUSY;
611
612 /* Inject interrupt level change in APLIC */
613 return kvm_riscv_aia_aplic_inject(kvm, irq, level);
614 }
615
kvm_riscv_aia_init_vm(struct kvm * kvm)616 void kvm_riscv_aia_init_vm(struct kvm *kvm)
617 {
618 struct kvm_aia *aia = &kvm->arch.aia;
619
620 if (!kvm_riscv_aia_available())
621 return;
622
623 /*
624 * We don't do any memory allocations over here because these
625 * will be done after AIA device is initialized by the user-space.
626 *
627 * Refer, aia_init() implementation for more details.
628 */
629
630 /* Initialize default values in AIA global context */
631 aia->mode = (kvm_riscv_aia_nr_hgei) ?
632 KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
633 aia->nr_ids = kvm_riscv_aia_max_ids - 1;
634 aia->nr_sources = 0;
635 aia->nr_group_bits = 0;
636 aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
637 aia->nr_hart_bits = 0;
638 aia->nr_guest_bits = 0;
639 aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
640 }
641
kvm_riscv_aia_destroy_vm(struct kvm * kvm)642 void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
643 {
644 /* Proceed only if AIA was initialized successfully */
645 if (!kvm_riscv_aia_initialized(kvm))
646 return;
647
648 /* Cleanup APLIC context */
649 kvm_riscv_aia_aplic_cleanup(kvm);
650 }
651