1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #ifndef __KVM_RISCV_AIA_H
11 #define __KVM_RISCV_AIA_H
12
13 #include <linux/jump_label.h>
14 #include <linux/kvm_types.h>
15 #include <asm/csr.h>
16
17 struct kvm_aia {
18 /* In-kernel irqchip created */
19 bool in_kernel;
20
21 /* In-kernel irqchip initialized */
22 bool initialized;
23
24 /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
25 u32 mode;
26
27 /* Number of MSIs */
28 u32 nr_ids;
29
30 /* Number of wired IRQs */
31 u32 nr_sources;
32
33 /* Number of group bits in IMSIC address */
34 u32 nr_group_bits;
35
36 /* Position of group bits in IMSIC address */
37 u32 nr_group_shift;
38
39 /* Number of hart bits in IMSIC address */
40 u32 nr_hart_bits;
41
42 /* Number of guest bits in IMSIC address */
43 u32 nr_guest_bits;
44
45 /* Guest physical address of APLIC */
46 gpa_t aplic_addr;
47
48 /* Internal state of APLIC */
49 void *aplic_state;
50 };
51
52 struct kvm_vcpu_aia_csr {
53 unsigned long vsiselect;
54 unsigned long hviprio1;
55 unsigned long hviprio2;
56 unsigned long vsieh;
57 unsigned long hviph;
58 unsigned long hviprio1h;
59 unsigned long hviprio2h;
60 };
61
62 struct kvm_vcpu_aia {
63 /* CPU AIA CSR context of Guest VCPU */
64 struct kvm_vcpu_aia_csr guest_csr;
65
66 /* Guest physical address of IMSIC for this VCPU */
67 gpa_t imsic_addr;
68
69 /* HART index of IMSIC extacted from guest physical address */
70 u32 hart_index;
71
72 /* Internal state of IMSIC for this VCPU */
73 void *imsic_state;
74 };
75
76 #define KVM_RISCV_AIA_UNDEF_ADDR (-1)
77
78 #define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
79
80 #define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
81
82 extern unsigned int kvm_riscv_aia_nr_hgei;
83 extern unsigned int kvm_riscv_aia_max_ids;
84 DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
85 #define kvm_riscv_aia_available() \
86 static_branch_unlikely(&kvm_riscv_aia_available)
87
88 extern struct kvm_device_ops kvm_riscv_aia_device_ops;
89
90 void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
91 int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
92
93 #define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
94 int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
95 unsigned long *val, unsigned long new_val,
96 unsigned long wr_mask);
97 int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
98 bool write, unsigned long *val);
99 int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
100 void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
101 int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
102 u32 guest_index, u32 offset, u32 iid);
103 int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
104 void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
105
106 int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
107 int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
108 int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
109 int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
110 int kvm_riscv_aia_aplic_init(struct kvm *kvm);
111 void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
112
113 #ifdef CONFIG_32BIT
114 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
115 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
116 #else
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)117 static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
118 {
119 }
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)120 static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
121 {
122 }
123 #endif
124 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
125
126 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
127 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
128 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
129 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
130 unsigned long reg_num,
131 unsigned long *out_val);
132 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
133 unsigned long reg_num,
134 unsigned long val);
135
136 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
137 unsigned int csr_num,
138 unsigned long *val,
139 unsigned long new_val,
140 unsigned long wr_mask);
141 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
142 unsigned long *val, unsigned long new_val,
143 unsigned long wr_mask);
144 #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
145 { .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
146 { .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
147
148 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
149 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
150 int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
151 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
152
153 int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
154 u32 guest_index, u32 iid);
155 int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
156 int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
157
158 void kvm_riscv_aia_init_vm(struct kvm *kvm);
159 void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
160
161 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
162 void __iomem **hgei_va, phys_addr_t *hgei_pa);
163 void kvm_riscv_aia_free_hgei(int cpu, int hgei);
164 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
165
166 void kvm_riscv_aia_enable(void);
167 void kvm_riscv_aia_disable(void);
168 int kvm_riscv_aia_init(void);
169 void kvm_riscv_aia_exit(void);
170
171 #endif
172