1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/riscv-imsic.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kvm_host.h>
16 #include <linux/percpu.h>
17 #include <linux/spinlock.h>
18 #include <asm/cpufeature.h>
19
20 struct aia_hgei_control {
21 raw_spinlock_t lock;
22 unsigned long free_bitmap;
23 struct kvm_vcpu *owners[BITS_PER_LONG];
24 };
25 static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
26 static int hgei_parent_irq;
27
28 unsigned int kvm_riscv_aia_nr_hgei;
29 unsigned int kvm_riscv_aia_max_ids;
30 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
31
aia_find_hgei(struct kvm_vcpu * owner)32 static int aia_find_hgei(struct kvm_vcpu *owner)
33 {
34 int i, hgei;
35 unsigned long flags;
36 struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
37
38 raw_spin_lock_irqsave(&hgctrl->lock, flags);
39
40 hgei = -1;
41 for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
42 if (hgctrl->owners[i] == owner) {
43 hgei = i;
44 break;
45 }
46 }
47
48 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
49
50 put_cpu_ptr(&aia_hgei);
51 return hgei;
52 }
53
aia_set_hvictl(bool ext_irq_pending)54 static void aia_set_hvictl(bool ext_irq_pending)
55 {
56 unsigned long hvictl;
57
58 /*
59 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
60 * no interrupt in HVICTL.
61 */
62
63 hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
64 hvictl |= ext_irq_pending;
65 csr_write(CSR_HVICTL, hvictl);
66 }
67
68 #ifdef CONFIG_32BIT
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)69 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
70 {
71 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
72 unsigned long mask, val;
73
74 if (!kvm_riscv_aia_available())
75 return;
76
77 if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
78 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
79 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
80
81 csr->hviph &= ~mask;
82 csr->hviph |= val;
83 }
84 }
85
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)86 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
87 {
88 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
89
90 if (kvm_riscv_aia_available())
91 csr->vsieh = csr_read(CSR_VSIEH);
92 }
93 #endif
94
kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu * vcpu,u64 mask)95 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
96 {
97 int hgei;
98 unsigned long seip;
99
100 if (!kvm_riscv_aia_available())
101 return false;
102
103 #ifdef CONFIG_32BIT
104 if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
105 (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
106 return true;
107 #endif
108
109 seip = vcpu->arch.guest_csr.vsie;
110 seip &= (unsigned long)mask;
111 seip &= BIT(IRQ_S_EXT);
112
113 if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
114 return false;
115
116 hgei = aia_find_hgei(vcpu);
117 if (hgei > 0)
118 return !!(csr_read(CSR_HGEIP) & BIT(hgei));
119
120 return false;
121 }
122
kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu * vcpu)123 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
124 {
125 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
126
127 if (!kvm_riscv_aia_available())
128 return;
129
130 #ifdef CONFIG_32BIT
131 csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
132 #endif
133 aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
134 }
135
kvm_riscv_vcpu_aia_load(struct kvm_vcpu * vcpu,int cpu)136 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
137 {
138 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
139
140 if (!kvm_riscv_aia_available())
141 return;
142
143 csr_write(CSR_VSISELECT, csr->vsiselect);
144 csr_write(CSR_HVIPRIO1, csr->hviprio1);
145 csr_write(CSR_HVIPRIO2, csr->hviprio2);
146 #ifdef CONFIG_32BIT
147 csr_write(CSR_VSIEH, csr->vsieh);
148 csr_write(CSR_HVIPH, csr->hviph);
149 csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
150 csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
151 #endif
152 }
153
kvm_riscv_vcpu_aia_put(struct kvm_vcpu * vcpu)154 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
155 {
156 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
157
158 if (!kvm_riscv_aia_available())
159 return;
160
161 csr->vsiselect = csr_read(CSR_VSISELECT);
162 csr->hviprio1 = csr_read(CSR_HVIPRIO1);
163 csr->hviprio2 = csr_read(CSR_HVIPRIO2);
164 #ifdef CONFIG_32BIT
165 csr->vsieh = csr_read(CSR_VSIEH);
166 csr->hviph = csr_read(CSR_HVIPH);
167 csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
168 csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
169 #endif
170 }
171
kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)172 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
173 unsigned long reg_num,
174 unsigned long *out_val)
175 {
176 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
177
178 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
179 return -ENOENT;
180
181 *out_val = 0;
182 if (kvm_riscv_aia_available())
183 *out_val = ((unsigned long *)csr)[reg_num];
184
185 return 0;
186 }
187
kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long val)188 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
189 unsigned long reg_num,
190 unsigned long val)
191 {
192 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
193
194 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
195 return -ENOENT;
196
197 if (kvm_riscv_aia_available()) {
198 ((unsigned long *)csr)[reg_num] = val;
199
200 #ifdef CONFIG_32BIT
201 if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
202 WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
203 #endif
204 }
205
206 return 0;
207 }
208
kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)209 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
210 unsigned int csr_num,
211 unsigned long *val,
212 unsigned long new_val,
213 unsigned long wr_mask)
214 {
215 /* If AIA not available then redirect trap */
216 if (!kvm_riscv_aia_available())
217 return KVM_INSN_ILLEGAL_TRAP;
218
219 /* If AIA not initialized then forward to user space */
220 if (!kvm_riscv_aia_initialized(vcpu->kvm))
221 return KVM_INSN_EXIT_TO_USER_SPACE;
222
223 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
224 val, new_val, wr_mask);
225 }
226
227 /*
228 * External IRQ priority always read-only zero. This means default
229 * priority order is always preferred for external IRQs unless
230 * HVICTL.IID == 9 and HVICTL.IPRIO != 0
231 */
232 static int aia_irq2bitpos[] = {
233 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
234 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
235 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
236 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
241 };
242
aia_get_iprio8(struct kvm_vcpu * vcpu,unsigned int irq)243 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
244 {
245 unsigned long hviprio;
246 int bitpos = aia_irq2bitpos[irq];
247
248 if (bitpos < 0)
249 return 0;
250
251 switch (bitpos / BITS_PER_LONG) {
252 case 0:
253 hviprio = csr_read(CSR_HVIPRIO1);
254 break;
255 case 1:
256 #ifndef CONFIG_32BIT
257 hviprio = csr_read(CSR_HVIPRIO2);
258 break;
259 #else
260 hviprio = csr_read(CSR_HVIPRIO1H);
261 break;
262 case 2:
263 hviprio = csr_read(CSR_HVIPRIO2);
264 break;
265 case 3:
266 hviprio = csr_read(CSR_HVIPRIO2H);
267 break;
268 #endif
269 default:
270 return 0;
271 }
272
273 return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
274 }
275
aia_set_iprio8(struct kvm_vcpu * vcpu,unsigned int irq,u8 prio)276 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
277 {
278 unsigned long hviprio;
279 int bitpos = aia_irq2bitpos[irq];
280
281 if (bitpos < 0)
282 return;
283
284 switch (bitpos / BITS_PER_LONG) {
285 case 0:
286 hviprio = csr_read(CSR_HVIPRIO1);
287 break;
288 case 1:
289 #ifndef CONFIG_32BIT
290 hviprio = csr_read(CSR_HVIPRIO2);
291 break;
292 #else
293 hviprio = csr_read(CSR_HVIPRIO1H);
294 break;
295 case 2:
296 hviprio = csr_read(CSR_HVIPRIO2);
297 break;
298 case 3:
299 hviprio = csr_read(CSR_HVIPRIO2H);
300 break;
301 #endif
302 default:
303 return;
304 }
305
306 hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
307 hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
308
309 switch (bitpos / BITS_PER_LONG) {
310 case 0:
311 csr_write(CSR_HVIPRIO1, hviprio);
312 break;
313 case 1:
314 #ifndef CONFIG_32BIT
315 csr_write(CSR_HVIPRIO2, hviprio);
316 break;
317 #else
318 csr_write(CSR_HVIPRIO1H, hviprio);
319 break;
320 case 2:
321 csr_write(CSR_HVIPRIO2, hviprio);
322 break;
323 case 3:
324 csr_write(CSR_HVIPRIO2H, hviprio);
325 break;
326 #endif
327 default:
328 return;
329 }
330 }
331
aia_rmw_iprio(struct kvm_vcpu * vcpu,unsigned int isel,unsigned long * val,unsigned long new_val,unsigned long wr_mask)332 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
333 unsigned long *val, unsigned long new_val,
334 unsigned long wr_mask)
335 {
336 int i, first_irq, nirqs;
337 unsigned long old_val;
338 u8 prio;
339
340 #ifndef CONFIG_32BIT
341 if (isel & 0x1)
342 return KVM_INSN_ILLEGAL_TRAP;
343 #endif
344
345 nirqs = 4 * (BITS_PER_LONG / 32);
346 first_irq = (isel - ISELECT_IPRIO0) * 4;
347
348 old_val = 0;
349 for (i = 0; i < nirqs; i++) {
350 prio = aia_get_iprio8(vcpu, first_irq + i);
351 old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
352 }
353
354 if (val)
355 *val = old_val;
356
357 if (wr_mask) {
358 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
359 for (i = 0; i < nirqs; i++) {
360 prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
361 TOPI_IPRIO_MASK;
362 aia_set_iprio8(vcpu, first_irq + i, prio);
363 }
364 }
365
366 return KVM_INSN_CONTINUE_NEXT_SEPC;
367 }
368
kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)369 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
370 unsigned long *val, unsigned long new_val,
371 unsigned long wr_mask)
372 {
373 unsigned int isel;
374
375 /* If AIA not available then redirect trap */
376 if (!kvm_riscv_aia_available())
377 return KVM_INSN_ILLEGAL_TRAP;
378
379 /* First try to emulate in kernel space */
380 isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
381 if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
382 return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
383 else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
384 kvm_riscv_aia_initialized(vcpu->kvm))
385 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
386 wr_mask);
387
388 /* We can't handle it here so redirect to user space */
389 return KVM_INSN_EXIT_TO_USER_SPACE;
390 }
391
kvm_riscv_aia_alloc_hgei(int cpu,struct kvm_vcpu * owner,void __iomem ** hgei_va,phys_addr_t * hgei_pa)392 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
393 void __iomem **hgei_va, phys_addr_t *hgei_pa)
394 {
395 int ret = -ENOENT;
396 unsigned long flags;
397 const struct imsic_global_config *gc;
398 const struct imsic_local_config *lc;
399 struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
400
401 if (!kvm_riscv_aia_available() || !hgctrl)
402 return -ENODEV;
403
404 raw_spin_lock_irqsave(&hgctrl->lock, flags);
405
406 if (hgctrl->free_bitmap) {
407 ret = __ffs(hgctrl->free_bitmap);
408 hgctrl->free_bitmap &= ~BIT(ret);
409 hgctrl->owners[ret] = owner;
410 }
411
412 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
413
414 gc = imsic_get_global_config();
415 lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
416 if (lc && ret > 0) {
417 if (hgei_va)
418 *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
419 if (hgei_pa)
420 *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
421 }
422
423 return ret;
424 }
425
kvm_riscv_aia_free_hgei(int cpu,int hgei)426 void kvm_riscv_aia_free_hgei(int cpu, int hgei)
427 {
428 unsigned long flags;
429 struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
430
431 if (!kvm_riscv_aia_available() || !hgctrl)
432 return;
433
434 raw_spin_lock_irqsave(&hgctrl->lock, flags);
435
436 if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
437 if (!(hgctrl->free_bitmap & BIT(hgei))) {
438 hgctrl->free_bitmap |= BIT(hgei);
439 hgctrl->owners[hgei] = NULL;
440 }
441 }
442
443 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
444 }
445
kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu * owner,bool enable)446 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
447 {
448 int hgei;
449
450 if (!kvm_riscv_aia_available())
451 return;
452
453 hgei = aia_find_hgei(owner);
454 if (hgei > 0) {
455 if (enable)
456 csr_set(CSR_HGEIE, BIT(hgei));
457 else
458 csr_clear(CSR_HGEIE, BIT(hgei));
459 }
460 }
461
hgei_interrupt(int irq,void * dev_id)462 static irqreturn_t hgei_interrupt(int irq, void *dev_id)
463 {
464 int i;
465 unsigned long hgei_mask, flags;
466 struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
467
468 hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
469 csr_clear(CSR_HGEIE, hgei_mask);
470
471 raw_spin_lock_irqsave(&hgctrl->lock, flags);
472
473 for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
474 if (hgctrl->owners[i])
475 kvm_vcpu_kick(hgctrl->owners[i]);
476 }
477
478 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
479
480 put_cpu_ptr(&aia_hgei);
481 return IRQ_HANDLED;
482 }
483
aia_hgei_init(void)484 static int aia_hgei_init(void)
485 {
486 int cpu, rc;
487 struct irq_domain *domain;
488 struct aia_hgei_control *hgctrl;
489
490 /* Initialize per-CPU guest external interrupt line management */
491 for_each_possible_cpu(cpu) {
492 hgctrl = per_cpu_ptr(&aia_hgei, cpu);
493 raw_spin_lock_init(&hgctrl->lock);
494 if (kvm_riscv_aia_nr_hgei) {
495 hgctrl->free_bitmap =
496 BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
497 hgctrl->free_bitmap &= ~BIT(0);
498 } else
499 hgctrl->free_bitmap = 0;
500 }
501
502 /* Find INTC irq domain */
503 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
504 DOMAIN_BUS_ANY);
505 if (!domain) {
506 kvm_err("unable to find INTC domain\n");
507 return -ENOENT;
508 }
509
510 /* Map per-CPU SGEI interrupt from INTC domain */
511 hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
512 if (!hgei_parent_irq) {
513 kvm_err("unable to map SGEI IRQ\n");
514 return -ENOMEM;
515 }
516
517 /* Request per-CPU SGEI interrupt */
518 rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
519 "riscv-kvm", &aia_hgei);
520 if (rc) {
521 kvm_err("failed to request SGEI IRQ\n");
522 return rc;
523 }
524
525 return 0;
526 }
527
aia_hgei_exit(void)528 static void aia_hgei_exit(void)
529 {
530 /* Free per-CPU SGEI interrupt */
531 free_percpu_irq(hgei_parent_irq, &aia_hgei);
532 }
533
kvm_riscv_aia_enable(void)534 void kvm_riscv_aia_enable(void)
535 {
536 if (!kvm_riscv_aia_available())
537 return;
538
539 aia_set_hvictl(false);
540 csr_write(CSR_HVIPRIO1, 0x0);
541 csr_write(CSR_HVIPRIO2, 0x0);
542 #ifdef CONFIG_32BIT
543 csr_write(CSR_HVIPH, 0x0);
544 csr_write(CSR_HIDELEGH, 0x0);
545 csr_write(CSR_HVIPRIO1H, 0x0);
546 csr_write(CSR_HVIPRIO2H, 0x0);
547 #endif
548
549 /* Enable per-CPU SGEI interrupt */
550 enable_percpu_irq(hgei_parent_irq,
551 irq_get_trigger_type(hgei_parent_irq));
552 csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
553 /* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
554 if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
555 csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
556 }
557
kvm_riscv_aia_disable(void)558 void kvm_riscv_aia_disable(void)
559 {
560 int i;
561 unsigned long flags;
562 struct kvm_vcpu *vcpu;
563 struct aia_hgei_control *hgctrl;
564
565 if (!kvm_riscv_aia_available())
566 return;
567 hgctrl = get_cpu_ptr(&aia_hgei);
568
569 if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
570 csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
571 /* Disable per-CPU SGEI interrupt */
572 csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
573 disable_percpu_irq(hgei_parent_irq);
574
575 aia_set_hvictl(false);
576
577 raw_spin_lock_irqsave(&hgctrl->lock, flags);
578
579 for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
580 vcpu = hgctrl->owners[i];
581 if (!vcpu)
582 continue;
583
584 /*
585 * We release hgctrl->lock before notifying IMSIC
586 * so that we don't have lock ordering issues.
587 */
588 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
589
590 /* Notify IMSIC */
591 kvm_riscv_vcpu_aia_imsic_release(vcpu);
592
593 /*
594 * Wakeup VCPU if it was blocked so that it can
595 * run on other HARTs
596 */
597 if (csr_read(CSR_HGEIE) & BIT(i)) {
598 csr_clear(CSR_HGEIE, BIT(i));
599 kvm_vcpu_kick(vcpu);
600 }
601
602 raw_spin_lock_irqsave(&hgctrl->lock, flags);
603 }
604
605 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
606
607 put_cpu_ptr(&aia_hgei);
608 }
609
kvm_riscv_aia_init(void)610 int kvm_riscv_aia_init(void)
611 {
612 int rc;
613 const struct imsic_global_config *gc;
614
615 if (!riscv_isa_extension_available(NULL, SxAIA))
616 return -ENODEV;
617 gc = imsic_get_global_config();
618
619 /* Figure-out number of bits in HGEIE */
620 csr_write(CSR_HGEIE, -1UL);
621 kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
622 csr_write(CSR_HGEIE, 0);
623 if (kvm_riscv_aia_nr_hgei)
624 kvm_riscv_aia_nr_hgei--;
625
626 /*
627 * Number of usable HGEI lines should be minimum of per-HART
628 * IMSIC guest files and number of bits in HGEIE
629 */
630 if (gc)
631 kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
632 BIT(gc->guest_index_bits) - 1);
633 else
634 kvm_riscv_aia_nr_hgei = 0;
635
636 /* Find number of guest MSI IDs */
637 kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
638 if (gc && kvm_riscv_aia_nr_hgei)
639 kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
640
641 /* Initialize guest external interrupt line management */
642 rc = aia_hgei_init();
643 if (rc)
644 return rc;
645
646 /* Register device operations */
647 rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
648 KVM_DEV_TYPE_RISCV_AIA);
649 if (rc) {
650 aia_hgei_exit();
651 return rc;
652 }
653
654 /* Enable KVM AIA support */
655 static_branch_enable(&kvm_riscv_aia_available);
656
657 return 0;
658 }
659
kvm_riscv_aia_exit(void)660 void kvm_riscv_aia_exit(void)
661 {
662 if (!kvm_riscv_aia_available())
663 return;
664
665 /* Unregister device operations */
666 kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
667
668 /* Cleanup the HGEI state */
669 aia_hgei_exit();
670 }
671