1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <asm/kvm_csr.h>
9 #include <asm/kvm_vcpu.h>
10 #include <asm/kvm_dmsintc.h>
11
12 static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
13 [INT_TI] = CPU_TIMER,
14 [INT_IPI] = CPU_IPI,
15 [INT_SWI0] = CPU_SIP0,
16 [INT_SWI1] = CPU_SIP1,
17 [INT_HWI0] = CPU_IP0,
18 [INT_HWI1] = CPU_IP1,
19 [INT_HWI2] = CPU_IP2,
20 [INT_HWI3] = CPU_IP3,
21 [INT_HWI4] = CPU_IP4,
22 [INT_HWI5] = CPU_IP5,
23 [INT_HWI6] = CPU_IP6,
24 [INT_HWI7] = CPU_IP7,
25 [INT_AVEC] = CPU_AVEC,
26 };
27
kvm_irq_deliver(struct kvm_vcpu * vcpu,unsigned int priority)28 static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
29 {
30 unsigned int irq = 0;
31 unsigned long old, new;
32
33 clear_bit(priority, &vcpu->arch.irq_pending);
34 if (priority < EXCCODE_INT_NUM)
35 irq = priority_to_irq[priority];
36
37 switch (priority) {
38 case INT_AVEC:
39 if (!kvm_guest_has_msgint(&vcpu->arch))
40 break;
41 dmsintc_inject_irq(vcpu);
42 fallthrough;
43 case INT_TI:
44 case INT_IPI:
45 case INT_SWI0:
46 case INT_SWI1:
47 old = kvm_read_hw_gcsr(LOONGARCH_CSR_TVAL);
48 set_gcsr_estat(irq);
49 new = kvm_read_hw_gcsr(LOONGARCH_CSR_TVAL);
50
51 /* Inject TI if TVAL inverted */
52 if (new > old)
53 set_gcsr_estat(CPU_TIMER);
54 break;
55
56 case INT_HWI0 ... INT_HWI7:
57 set_csr_gintc(irq);
58 break;
59
60 default:
61 break;
62 }
63
64 return 1;
65 }
66
kvm_irq_clear(struct kvm_vcpu * vcpu,unsigned int priority)67 static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
68 {
69 unsigned int irq = 0;
70 unsigned long old, new;
71
72 clear_bit(priority, &vcpu->arch.irq_clear);
73 if (priority < EXCCODE_INT_NUM)
74 irq = priority_to_irq[priority];
75
76 switch (priority) {
77 case INT_AVEC:
78 if (!kvm_guest_has_msgint(&vcpu->arch))
79 break;
80 fallthrough;
81 case INT_TI:
82 case INT_IPI:
83 case INT_SWI0:
84 case INT_SWI1:
85 old = kvm_read_hw_gcsr(LOONGARCH_CSR_TVAL);
86 clear_gcsr_estat(irq);
87 new = kvm_read_hw_gcsr(LOONGARCH_CSR_TVAL);
88
89 /* Inject TI if TVAL inverted */
90 if (new > old)
91 set_gcsr_estat(CPU_TIMER);
92 break;
93
94 case INT_HWI0 ... INT_HWI7:
95 clear_csr_gintc(irq);
96 break;
97
98 default:
99 break;
100 }
101
102 return 1;
103 }
104
kvm_deliver_intr(struct kvm_vcpu * vcpu)105 void kvm_deliver_intr(struct kvm_vcpu *vcpu)
106 {
107 unsigned int priority;
108 unsigned long *pending = &vcpu->arch.irq_pending;
109 unsigned long *pending_clr = &vcpu->arch.irq_clear;
110
111 for_each_set_bit(priority, pending_clr, EXCCODE_INT_NUM)
112 kvm_irq_clear(vcpu, priority);
113
114 for_each_set_bit(priority, pending, EXCCODE_INT_NUM)
115 kvm_irq_deliver(vcpu, priority);
116 }
117
kvm_pending_timer(struct kvm_vcpu * vcpu)118 int kvm_pending_timer(struct kvm_vcpu *vcpu)
119 {
120 return test_bit(INT_TI, &vcpu->arch.irq_pending);
121 }
122
123 /*
124 * Only support illegal instruction or illegal Address Error exception,
125 * Other exceptions are injected by hardware in kvm mode
126 */
_kvm_deliver_exception(struct kvm_vcpu * vcpu,unsigned int code,unsigned int subcode)127 static void _kvm_deliver_exception(struct kvm_vcpu *vcpu,
128 unsigned int code, unsigned int subcode)
129 {
130 unsigned long val, vec_size;
131
132 /*
133 * BADV is added for EXCCODE_ADE exception
134 * Use PC register (GVA address) if it is instruction exeception
135 * Else use BADV from host side (GPA address) for data exeception
136 */
137 if (code == EXCCODE_ADE) {
138 if (subcode == EXSUBCODE_ADEF)
139 val = vcpu->arch.pc;
140 else
141 val = vcpu->arch.badv;
142 kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val);
143 }
144
145 /* Set exception instruction */
146 kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi);
147
148 /*
149 * Save CRMD in PRMD
150 * Set IRQ disabled and PLV0 with CRMD
151 */
152 val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD);
153 kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val);
154 val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE);
155 kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val);
156
157 /* Set exception PC address */
158 kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc);
159
160 /*
161 * Set exception code
162 * Exception and interrupt can be inject at the same time
163 * Hardware will handle exception first and then extern interrupt
164 * Exception code is Ecode in ESTAT[16:21]
165 * Interrupt code in ESTAT[0:12]
166 */
167 val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT);
168 val = (val & ~CSR_ESTAT_EXC) | code;
169 kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val);
170
171 /* Calculate expcetion entry address */
172 val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG);
173 vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT;
174 if (vec_size)
175 vec_size = (1 << vec_size) * 4;
176 val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY);
177 vcpu->arch.pc = val + code * vec_size;
178 }
179
kvm_deliver_exception(struct kvm_vcpu * vcpu)180 void kvm_deliver_exception(struct kvm_vcpu *vcpu)
181 {
182 unsigned int code;
183 unsigned long *pending = &vcpu->arch.exception_pending;
184
185 if (*pending) {
186 code = __ffs(*pending);
187 _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode);
188 *pending = 0;
189 vcpu->arch.esubcode = 0;
190 }
191 }
192