1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/atomic.h>
11 #include <linux/bitmap.h>
12 #include <linux/irqchip/riscv-imsic.h>
13 #include <linux/kvm_host.h>
14 #include <linux/math.h>
15 #include <linux/spinlock.h>
16 #include <linux/swab.h>
17 #include <kvm/iodev.h>
18 #include <asm/csr.h>
19 #include <asm/kvm_mmu.h>
20
21 #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
22
23 struct imsic_mrif_eix {
24 unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
25 unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
26 };
27
28 struct imsic_mrif {
29 struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
30 unsigned long eithreshold;
31 unsigned long eidelivery;
32 };
33
34 struct imsic {
35 struct kvm_io_device iodev;
36
37 u32 nr_msis;
38 u32 nr_eix;
39 u32 nr_hw_eix;
40
41 /*
42 * At any point in time, the register state is in
43 * one of the following places:
44 *
45 * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
46 * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
47 */
48
49 /* IMSIC VS-file */
50 rwlock_t vsfile_lock;
51 int vsfile_cpu;
52 int vsfile_hgei;
53 void __iomem *vsfile_va;
54 phys_addr_t vsfile_pa;
55
56 /* IMSIC SW-file */
57 struct imsic_mrif *swfile;
58 phys_addr_t swfile_pa;
59 raw_spinlock_t swfile_extirq_lock;
60 };
61
62 #define imsic_vs_csr_read(__c) \
63 ({ \
64 unsigned long __r; \
65 csr_write(CSR_VSISELECT, __c); \
66 __r = csr_read(CSR_VSIREG); \
67 __r; \
68 })
69
70 #define imsic_read_switchcase(__ireg) \
71 case __ireg: \
72 return imsic_vs_csr_read(__ireg);
73 #define imsic_read_switchcase_2(__ireg) \
74 imsic_read_switchcase(__ireg + 0) \
75 imsic_read_switchcase(__ireg + 1)
76 #define imsic_read_switchcase_4(__ireg) \
77 imsic_read_switchcase_2(__ireg + 0) \
78 imsic_read_switchcase_2(__ireg + 2)
79 #define imsic_read_switchcase_8(__ireg) \
80 imsic_read_switchcase_4(__ireg + 0) \
81 imsic_read_switchcase_4(__ireg + 4)
82 #define imsic_read_switchcase_16(__ireg) \
83 imsic_read_switchcase_8(__ireg + 0) \
84 imsic_read_switchcase_8(__ireg + 8)
85 #define imsic_read_switchcase_32(__ireg) \
86 imsic_read_switchcase_16(__ireg + 0) \
87 imsic_read_switchcase_16(__ireg + 16)
88 #define imsic_read_switchcase_64(__ireg) \
89 imsic_read_switchcase_32(__ireg + 0) \
90 imsic_read_switchcase_32(__ireg + 32)
91
imsic_eix_read(int ireg)92 static unsigned long imsic_eix_read(int ireg)
93 {
94 switch (ireg) {
95 imsic_read_switchcase_64(IMSIC_EIP0)
96 imsic_read_switchcase_64(IMSIC_EIE0)
97 }
98
99 return 0;
100 }
101
102 #define imsic_vs_csr_swap(__c, __v) \
103 ({ \
104 unsigned long __r; \
105 csr_write(CSR_VSISELECT, __c); \
106 __r = csr_swap(CSR_VSIREG, __v); \
107 __r; \
108 })
109
110 #define imsic_swap_switchcase(__ireg, __v) \
111 case __ireg: \
112 return imsic_vs_csr_swap(__ireg, __v);
113 #define imsic_swap_switchcase_2(__ireg, __v) \
114 imsic_swap_switchcase(__ireg + 0, __v) \
115 imsic_swap_switchcase(__ireg + 1, __v)
116 #define imsic_swap_switchcase_4(__ireg, __v) \
117 imsic_swap_switchcase_2(__ireg + 0, __v) \
118 imsic_swap_switchcase_2(__ireg + 2, __v)
119 #define imsic_swap_switchcase_8(__ireg, __v) \
120 imsic_swap_switchcase_4(__ireg + 0, __v) \
121 imsic_swap_switchcase_4(__ireg + 4, __v)
122 #define imsic_swap_switchcase_16(__ireg, __v) \
123 imsic_swap_switchcase_8(__ireg + 0, __v) \
124 imsic_swap_switchcase_8(__ireg + 8, __v)
125 #define imsic_swap_switchcase_32(__ireg, __v) \
126 imsic_swap_switchcase_16(__ireg + 0, __v) \
127 imsic_swap_switchcase_16(__ireg + 16, __v)
128 #define imsic_swap_switchcase_64(__ireg, __v) \
129 imsic_swap_switchcase_32(__ireg + 0, __v) \
130 imsic_swap_switchcase_32(__ireg + 32, __v)
131
imsic_eix_swap(int ireg,unsigned long val)132 static unsigned long imsic_eix_swap(int ireg, unsigned long val)
133 {
134 switch (ireg) {
135 imsic_swap_switchcase_64(IMSIC_EIP0, val)
136 imsic_swap_switchcase_64(IMSIC_EIE0, val)
137 }
138
139 return 0;
140 }
141
142 #define imsic_vs_csr_write(__c, __v) \
143 do { \
144 csr_write(CSR_VSISELECT, __c); \
145 csr_write(CSR_VSIREG, __v); \
146 } while (0)
147
148 #define imsic_write_switchcase(__ireg, __v) \
149 case __ireg: \
150 imsic_vs_csr_write(__ireg, __v); \
151 break;
152 #define imsic_write_switchcase_2(__ireg, __v) \
153 imsic_write_switchcase(__ireg + 0, __v) \
154 imsic_write_switchcase(__ireg + 1, __v)
155 #define imsic_write_switchcase_4(__ireg, __v) \
156 imsic_write_switchcase_2(__ireg + 0, __v) \
157 imsic_write_switchcase_2(__ireg + 2, __v)
158 #define imsic_write_switchcase_8(__ireg, __v) \
159 imsic_write_switchcase_4(__ireg + 0, __v) \
160 imsic_write_switchcase_4(__ireg + 4, __v)
161 #define imsic_write_switchcase_16(__ireg, __v) \
162 imsic_write_switchcase_8(__ireg + 0, __v) \
163 imsic_write_switchcase_8(__ireg + 8, __v)
164 #define imsic_write_switchcase_32(__ireg, __v) \
165 imsic_write_switchcase_16(__ireg + 0, __v) \
166 imsic_write_switchcase_16(__ireg + 16, __v)
167 #define imsic_write_switchcase_64(__ireg, __v) \
168 imsic_write_switchcase_32(__ireg + 0, __v) \
169 imsic_write_switchcase_32(__ireg + 32, __v)
170
imsic_eix_write(int ireg,unsigned long val)171 static void imsic_eix_write(int ireg, unsigned long val)
172 {
173 switch (ireg) {
174 imsic_write_switchcase_64(IMSIC_EIP0, val)
175 imsic_write_switchcase_64(IMSIC_EIE0, val)
176 }
177 }
178
179 #define imsic_vs_csr_set(__c, __v) \
180 do { \
181 csr_write(CSR_VSISELECT, __c); \
182 csr_set(CSR_VSIREG, __v); \
183 } while (0)
184
185 #define imsic_set_switchcase(__ireg, __v) \
186 case __ireg: \
187 imsic_vs_csr_set(__ireg, __v); \
188 break;
189 #define imsic_set_switchcase_2(__ireg, __v) \
190 imsic_set_switchcase(__ireg + 0, __v) \
191 imsic_set_switchcase(__ireg + 1, __v)
192 #define imsic_set_switchcase_4(__ireg, __v) \
193 imsic_set_switchcase_2(__ireg + 0, __v) \
194 imsic_set_switchcase_2(__ireg + 2, __v)
195 #define imsic_set_switchcase_8(__ireg, __v) \
196 imsic_set_switchcase_4(__ireg + 0, __v) \
197 imsic_set_switchcase_4(__ireg + 4, __v)
198 #define imsic_set_switchcase_16(__ireg, __v) \
199 imsic_set_switchcase_8(__ireg + 0, __v) \
200 imsic_set_switchcase_8(__ireg + 8, __v)
201 #define imsic_set_switchcase_32(__ireg, __v) \
202 imsic_set_switchcase_16(__ireg + 0, __v) \
203 imsic_set_switchcase_16(__ireg + 16, __v)
204 #define imsic_set_switchcase_64(__ireg, __v) \
205 imsic_set_switchcase_32(__ireg + 0, __v) \
206 imsic_set_switchcase_32(__ireg + 32, __v)
207
imsic_eix_set(int ireg,unsigned long val)208 static void imsic_eix_set(int ireg, unsigned long val)
209 {
210 switch (ireg) {
211 imsic_set_switchcase_64(IMSIC_EIP0, val)
212 imsic_set_switchcase_64(IMSIC_EIE0, val)
213 }
214 }
215
imsic_mrif_atomic_rmw(struct imsic_mrif * mrif,unsigned long * ptr,unsigned long new_val,unsigned long wr_mask)216 static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
217 unsigned long *ptr,
218 unsigned long new_val,
219 unsigned long wr_mask)
220 {
221 unsigned long old_val = 0, tmp = 0;
222
223 __asm__ __volatile__ (
224 "0: lr.w.aq %1, %0\n"
225 " and %2, %1, %3\n"
226 " or %2, %2, %4\n"
227 " sc.w.rl %2, %2, %0\n"
228 " bnez %2, 0b"
229 : "+A" (*ptr), "+r" (old_val), "+r" (tmp)
230 : "r" (~wr_mask), "r" (new_val & wr_mask)
231 : "memory");
232
233 return old_val;
234 }
235
imsic_mrif_atomic_or(struct imsic_mrif * mrif,unsigned long * ptr,unsigned long val)236 static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
237 unsigned long *ptr,
238 unsigned long val)
239 {
240 return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
241 }
242
243 #define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \
244 imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
245 #define imsic_mrif_atomic_read(__mrif, __ptr) \
246 imsic_mrif_atomic_or(__mrif, __ptr, 0)
247
imsic_mrif_topei(struct imsic_mrif * mrif,u32 nr_eix,u32 nr_msis)248 static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
249 {
250 struct imsic_mrif_eix *eix;
251 u32 i, imin, imax, ei, max_msi;
252 unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
253 unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
254 &mrif->eithreshold);
255
256 max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
257 eithreshold : nr_msis;
258 for (ei = 0; ei < nr_eix; ei++) {
259 eix = &mrif->eix[ei];
260 eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
261 imsic_mrif_atomic_read(mrif, &eix->eip[0]);
262 #ifdef CONFIG_32BIT
263 eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
264 imsic_mrif_atomic_read(mrif, &eix->eip[1]);
265 if (!eipend[0] && !eipend[1])
266 #else
267 if (!eipend[0])
268 #endif
269 continue;
270
271 imin = ei * BITS_PER_TYPE(u64);
272 imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
273 imin + BITS_PER_TYPE(u64) : max_msi;
274 for (i = (!imin) ? 1 : imin; i < imax; i++) {
275 if (test_bit(i - imin, eipend))
276 return (i << TOPEI_ID_SHIFT) | i;
277 }
278 }
279
280 return 0;
281 }
282
imsic_mrif_isel_check(u32 nr_eix,unsigned long isel)283 static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
284 {
285 u32 num = 0;
286
287 switch (isel) {
288 case IMSIC_EIDELIVERY:
289 case IMSIC_EITHRESHOLD:
290 break;
291 case IMSIC_EIP0 ... IMSIC_EIP63:
292 num = isel - IMSIC_EIP0;
293 break;
294 case IMSIC_EIE0 ... IMSIC_EIE63:
295 num = isel - IMSIC_EIE0;
296 break;
297 default:
298 return -ENOENT;
299 }
300 #ifndef CONFIG_32BIT
301 if (num & 0x1)
302 return -EINVAL;
303 #endif
304 if ((num / 2) >= nr_eix)
305 return -EINVAL;
306
307 return 0;
308 }
309
imsic_mrif_rmw(struct imsic_mrif * mrif,u32 nr_eix,unsigned long isel,unsigned long * val,unsigned long new_val,unsigned long wr_mask)310 static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
311 unsigned long isel, unsigned long *val,
312 unsigned long new_val, unsigned long wr_mask)
313 {
314 bool pend;
315 struct imsic_mrif_eix *eix;
316 unsigned long *ei, num, old_val = 0;
317
318 switch (isel) {
319 case IMSIC_EIDELIVERY:
320 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
321 new_val, wr_mask & 0x1);
322 break;
323 case IMSIC_EITHRESHOLD:
324 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
325 new_val, wr_mask & (IMSIC_MAX_ID - 1));
326 break;
327 case IMSIC_EIP0 ... IMSIC_EIP63:
328 case IMSIC_EIE0 ... IMSIC_EIE63:
329 if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
330 pend = true;
331 num = isel - IMSIC_EIP0;
332 } else {
333 pend = false;
334 num = isel - IMSIC_EIE0;
335 }
336
337 if ((num / 2) >= nr_eix)
338 return -EINVAL;
339 eix = &mrif->eix[num / 2];
340
341 #ifndef CONFIG_32BIT
342 if (num & 0x1)
343 return -EINVAL;
344 ei = (pend) ? &eix->eip[0] : &eix->eie[0];
345 #else
346 ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
347 #endif
348
349 /* Bit0 of EIP0 or EIE0 is read-only */
350 if (!num)
351 wr_mask &= ~BIT(0);
352
353 old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
354 break;
355 default:
356 return -ENOENT;
357 }
358
359 if (val)
360 *val = old_val;
361
362 return 0;
363 }
364
365 struct imsic_vsfile_read_data {
366 int hgei;
367 u32 nr_eix;
368 bool clear;
369 struct imsic_mrif *mrif;
370 };
371
imsic_vsfile_local_read(void * data)372 static void imsic_vsfile_local_read(void *data)
373 {
374 u32 i;
375 struct imsic_mrif_eix *eix;
376 struct imsic_vsfile_read_data *idata = data;
377 struct imsic_mrif *mrif = idata->mrif;
378 unsigned long new_hstatus, old_hstatus, old_vsiselect;
379
380 old_vsiselect = csr_read(CSR_VSISELECT);
381 old_hstatus = csr_read(CSR_HSTATUS);
382 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
383 new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
384 csr_write(CSR_HSTATUS, new_hstatus);
385
386 /*
387 * We don't use imsic_mrif_atomic_xyz() functions to store
388 * values in MRIF because imsic_vsfile_read() is always called
389 * with pointer to temporary MRIF on stack.
390 */
391
392 if (idata->clear) {
393 mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
394 mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
395 for (i = 0; i < idata->nr_eix; i++) {
396 eix = &mrif->eix[i];
397 eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
398 eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
399 #ifdef CONFIG_32BIT
400 eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
401 eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
402 #endif
403 }
404 } else {
405 mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
406 mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
407 for (i = 0; i < idata->nr_eix; i++) {
408 eix = &mrif->eix[i];
409 eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
410 eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
411 #ifdef CONFIG_32BIT
412 eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
413 eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
414 #endif
415 }
416 }
417
418 csr_write(CSR_HSTATUS, old_hstatus);
419 csr_write(CSR_VSISELECT, old_vsiselect);
420 }
421
imsic_vsfile_read(int vsfile_hgei,int vsfile_cpu,u32 nr_eix,bool clear,struct imsic_mrif * mrif)422 static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
423 bool clear, struct imsic_mrif *mrif)
424 {
425 struct imsic_vsfile_read_data idata;
426
427 /* We can only read clear if we have a IMSIC VS-file */
428 if (vsfile_cpu < 0 || vsfile_hgei <= 0)
429 return;
430
431 /* We can only read clear on local CPU */
432 idata.hgei = vsfile_hgei;
433 idata.nr_eix = nr_eix;
434 idata.clear = clear;
435 idata.mrif = mrif;
436 on_each_cpu_mask(cpumask_of(vsfile_cpu),
437 imsic_vsfile_local_read, &idata, 1);
438 }
439
440 struct imsic_vsfile_rw_data {
441 int hgei;
442 int isel;
443 bool write;
444 unsigned long val;
445 };
446
imsic_vsfile_local_rw(void * data)447 static void imsic_vsfile_local_rw(void *data)
448 {
449 struct imsic_vsfile_rw_data *idata = data;
450 unsigned long new_hstatus, old_hstatus, old_vsiselect;
451
452 old_vsiselect = csr_read(CSR_VSISELECT);
453 old_hstatus = csr_read(CSR_HSTATUS);
454 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
455 new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
456 csr_write(CSR_HSTATUS, new_hstatus);
457
458 switch (idata->isel) {
459 case IMSIC_EIDELIVERY:
460 if (idata->write)
461 imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
462 else
463 idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
464 break;
465 case IMSIC_EITHRESHOLD:
466 if (idata->write)
467 imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
468 else
469 idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
470 break;
471 case IMSIC_EIP0 ... IMSIC_EIP63:
472 case IMSIC_EIE0 ... IMSIC_EIE63:
473 #ifndef CONFIG_32BIT
474 if (idata->isel & 0x1)
475 break;
476 #endif
477 if (idata->write)
478 imsic_eix_write(idata->isel, idata->val);
479 else
480 idata->val = imsic_eix_read(idata->isel);
481 break;
482 default:
483 break;
484 }
485
486 csr_write(CSR_HSTATUS, old_hstatus);
487 csr_write(CSR_VSISELECT, old_vsiselect);
488 }
489
imsic_vsfile_rw(int vsfile_hgei,int vsfile_cpu,u32 nr_eix,unsigned long isel,bool write,unsigned long * val)490 static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
491 unsigned long isel, bool write,
492 unsigned long *val)
493 {
494 int rc;
495 struct imsic_vsfile_rw_data rdata;
496
497 /* We can only access register if we have a IMSIC VS-file */
498 if (vsfile_cpu < 0 || vsfile_hgei <= 0)
499 return -EINVAL;
500
501 /* Check IMSIC register iselect */
502 rc = imsic_mrif_isel_check(nr_eix, isel);
503 if (rc)
504 return rc;
505
506 /* We can only access register on local CPU */
507 rdata.hgei = vsfile_hgei;
508 rdata.isel = isel;
509 rdata.write = write;
510 rdata.val = (write) ? *val : 0;
511 on_each_cpu_mask(cpumask_of(vsfile_cpu),
512 imsic_vsfile_local_rw, &rdata, 1);
513
514 if (!write)
515 *val = rdata.val;
516
517 return 0;
518 }
519
imsic_vsfile_local_clear(int vsfile_hgei,u32 nr_eix)520 static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
521 {
522 u32 i;
523 unsigned long new_hstatus, old_hstatus, old_vsiselect;
524
525 /* We can only zero-out if we have a IMSIC VS-file */
526 if (vsfile_hgei <= 0)
527 return;
528
529 old_vsiselect = csr_read(CSR_VSISELECT);
530 old_hstatus = csr_read(CSR_HSTATUS);
531 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
532 new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
533 csr_write(CSR_HSTATUS, new_hstatus);
534
535 imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
536 imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
537 for (i = 0; i < nr_eix; i++) {
538 imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
539 imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
540 #ifdef CONFIG_32BIT
541 imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
542 imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
543 #endif
544 }
545
546 csr_write(CSR_HSTATUS, old_hstatus);
547 csr_write(CSR_VSISELECT, old_vsiselect);
548 }
549
imsic_vsfile_local_update(int vsfile_hgei,u32 nr_eix,struct imsic_mrif * mrif)550 static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
551 struct imsic_mrif *mrif)
552 {
553 u32 i;
554 struct imsic_mrif_eix *eix;
555 unsigned long new_hstatus, old_hstatus, old_vsiselect;
556
557 /* We can only update if we have a HW IMSIC context */
558 if (vsfile_hgei <= 0)
559 return;
560
561 /*
562 * We don't use imsic_mrif_atomic_xyz() functions to read values
563 * from MRIF in this function because it is always called with
564 * pointer to temporary MRIF on stack.
565 */
566
567 old_vsiselect = csr_read(CSR_VSISELECT);
568 old_hstatus = csr_read(CSR_HSTATUS);
569 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
570 new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
571 csr_write(CSR_HSTATUS, new_hstatus);
572
573 for (i = 0; i < nr_eix; i++) {
574 eix = &mrif->eix[i];
575 imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
576 imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
577 #ifdef CONFIG_32BIT
578 imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
579 imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
580 #endif
581 }
582 imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
583 imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
584
585 csr_write(CSR_HSTATUS, old_hstatus);
586 csr_write(CSR_VSISELECT, old_vsiselect);
587 }
588
imsic_vsfile_cleanup(struct imsic * imsic)589 static void imsic_vsfile_cleanup(struct imsic *imsic)
590 {
591 int old_vsfile_hgei, old_vsfile_cpu;
592 unsigned long flags;
593
594 /*
595 * We don't use imsic_mrif_atomic_xyz() functions to clear the
596 * SW-file in this function because it is always called when the
597 * VCPU is being destroyed.
598 */
599
600 write_lock_irqsave(&imsic->vsfile_lock, flags);
601 old_vsfile_hgei = imsic->vsfile_hgei;
602 old_vsfile_cpu = imsic->vsfile_cpu;
603 imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
604 imsic->vsfile_va = NULL;
605 imsic->vsfile_pa = 0;
606 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
607
608 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
609
610 if (old_vsfile_cpu >= 0)
611 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
612 }
613
imsic_swfile_extirq_update(struct kvm_vcpu * vcpu)614 static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
615 {
616 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
617 struct imsic_mrif *mrif = imsic->swfile;
618 unsigned long flags;
619
620 /*
621 * The critical section is necessary during external interrupt
622 * updates to avoid the risk of losing interrupts due to potential
623 * interruptions between reading topei and updating pending status.
624 */
625
626 raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
627
628 if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
629 imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
630 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
631 else
632 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
633
634 raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
635 }
636
imsic_swfile_read(struct kvm_vcpu * vcpu,bool clear,struct imsic_mrif * mrif)637 static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
638 struct imsic_mrif *mrif)
639 {
640 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
641
642 /*
643 * We don't use imsic_mrif_atomic_xyz() functions to read and
644 * write SW-file and MRIF in this function because it is always
645 * called when VCPU is not using SW-file and the MRIF points to
646 * a temporary MRIF on stack.
647 */
648
649 memcpy(mrif, imsic->swfile, sizeof(*mrif));
650 if (clear) {
651 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
652 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
653 }
654 }
655
imsic_swfile_update(struct kvm_vcpu * vcpu,struct imsic_mrif * mrif)656 static void imsic_swfile_update(struct kvm_vcpu *vcpu,
657 struct imsic_mrif *mrif)
658 {
659 u32 i;
660 struct imsic_mrif_eix *seix, *eix;
661 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
662 struct imsic_mrif *smrif = imsic->swfile;
663
664 imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
665 imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
666 for (i = 0; i < imsic->nr_eix; i++) {
667 seix = &smrif->eix[i];
668 eix = &mrif->eix[i];
669 imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
670 imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
671 #ifdef CONFIG_32BIT
672 imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
673 imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
674 #endif
675 }
676
677 imsic_swfile_extirq_update(vcpu);
678 }
679
kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu * vcpu)680 bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
681 {
682 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
683 unsigned long flags;
684 bool ret = false;
685
686 /*
687 * The IMSIC SW-file directly injects interrupt via hvip so
688 * only check for interrupt when IMSIC VS-file is being used.
689 */
690
691 read_lock_irqsave(&imsic->vsfile_lock, flags);
692 if (imsic->vsfile_cpu > -1) {
693 /*
694 * This function is typically called from kvm_vcpu_block() via
695 * kvm_arch_vcpu_runnable() upon WFI trap. The kvm_vcpu_block()
696 * can be preempted and the blocking VCPU might resume on a
697 * different CPU. This means it is possible that current CPU
698 * does not match the imsic->vsfile_cpu hence this function
699 * must check imsic->vsfile_cpu before accessing HGEIP CSR.
700 */
701 if (imsic->vsfile_cpu != vcpu->cpu)
702 ret = true;
703 else
704 ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
705 }
706 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
707
708 return ret;
709 }
710
kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu * vcpu,int cpu)711 void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
712 {
713 /*
714 * No need to explicitly clear HGEIE CSR bits because the
715 * hgei interrupt handler (aka hgei_interrupt()) will always
716 * clear it for us.
717 */
718 }
719
kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu * vcpu)720 void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
721 {
722 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
723 unsigned long flags;
724
725 if (!kvm_vcpu_is_blocking(vcpu))
726 return;
727
728 read_lock_irqsave(&imsic->vsfile_lock, flags);
729 if (imsic->vsfile_cpu > -1)
730 csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
731 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
732 }
733
kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu * vcpu)734 void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
735 {
736 unsigned long flags;
737 struct imsic_mrif tmrif;
738 int old_vsfile_hgei, old_vsfile_cpu;
739 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
740
741 /* Read and clear IMSIC VS-file details */
742 write_lock_irqsave(&imsic->vsfile_lock, flags);
743 old_vsfile_hgei = imsic->vsfile_hgei;
744 old_vsfile_cpu = imsic->vsfile_cpu;
745 imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
746 imsic->vsfile_va = NULL;
747 imsic->vsfile_pa = 0;
748 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
749
750 /* Do nothing, if no IMSIC VS-file to release */
751 if (old_vsfile_cpu < 0)
752 return;
753
754 /*
755 * At this point, all interrupt producers are still using
756 * the old IMSIC VS-file so we first re-direct all interrupt
757 * producers.
758 */
759
760 /* Purge the G-stage mapping */
761 kvm_riscv_mmu_iounmap(vcpu->kvm, vcpu->arch.aia_context.imsic_addr,
762 IMSIC_MMIO_PAGE_SZ);
763
764 /* TODO: Purge the IOMMU mapping ??? */
765
766 /*
767 * At this point, all interrupt producers have been re-directed
768 * to somewhere else so we move register state from the old IMSIC
769 * VS-file to the IMSIC SW-file.
770 */
771
772 /* Read and clear register state from old IMSIC VS-file */
773 memset(&tmrif, 0, sizeof(tmrif));
774 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
775 true, &tmrif);
776
777 /* Update register state in IMSIC SW-file */
778 imsic_swfile_update(vcpu, &tmrif);
779
780 /* Free-up old IMSIC VS-file */
781 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
782 }
783
kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu * vcpu)784 int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
785 {
786 unsigned long flags;
787 phys_addr_t new_vsfile_pa;
788 struct imsic_mrif tmrif;
789 void __iomem *new_vsfile_va;
790 struct kvm *kvm = vcpu->kvm;
791 struct kvm_run *run = vcpu->run;
792 struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
793 struct imsic *imsic = vaia->imsic_state;
794 int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
795
796 /* Do nothing for emulation mode */
797 if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
798 return 1;
799
800 /* IMSIC vCPU state may not be initialized yet */
801 if (!imsic)
802 return 1;
803
804 /* Read old IMSIC VS-file details */
805 read_lock_irqsave(&imsic->vsfile_lock, flags);
806 old_vsfile_hgei = imsic->vsfile_hgei;
807 old_vsfile_cpu = imsic->vsfile_cpu;
808 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
809
810 /* Do nothing if we are continuing on same CPU */
811 if (old_vsfile_cpu == vcpu->cpu)
812 return 1;
813
814 /* Allocate new IMSIC VS-file */
815 ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
816 &new_vsfile_va, &new_vsfile_pa);
817 if (ret <= 0) {
818 /* For HW acceleration mode, we can't continue */
819 if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
820 run->fail_entry.hardware_entry_failure_reason =
821 KVM_EXIT_FAIL_ENTRY_NO_VSFILE;
822 run->fail_entry.cpu = vcpu->cpu;
823 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
824 return 0;
825 }
826
827 /* Release old IMSIC VS-file */
828 if (old_vsfile_cpu >= 0)
829 kvm_riscv_vcpu_aia_imsic_release(vcpu);
830
831 /* For automatic mode, we continue */
832 goto done;
833 }
834 new_vsfile_hgei = ret;
835
836 /*
837 * At this point, all interrupt producers are still using
838 * to the old IMSIC VS-file so we first move all interrupt
839 * producers to the new IMSIC VS-file.
840 */
841
842 /* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */
843 csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei));
844
845 /* Zero-out new IMSIC VS-file */
846 imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
847
848 /* Update G-stage mapping for the new IMSIC VS-file */
849 ret = kvm_riscv_mmu_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
850 new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
851 true, true);
852 if (ret)
853 goto fail_free_vsfile_hgei;
854
855 /* TODO: Update the IOMMU mapping ??? */
856
857 /* Update new IMSIC VS-file details in IMSIC context */
858 write_lock_irqsave(&imsic->vsfile_lock, flags);
859 imsic->vsfile_hgei = new_vsfile_hgei;
860 imsic->vsfile_cpu = vcpu->cpu;
861 imsic->vsfile_va = new_vsfile_va;
862 imsic->vsfile_pa = new_vsfile_pa;
863 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
864
865 /*
866 * At this point, all interrupt producers have been moved
867 * to the new IMSIC VS-file so we move register state from
868 * the old IMSIC VS/SW-file to the new IMSIC VS-file.
869 */
870
871 memset(&tmrif, 0, sizeof(tmrif));
872 if (old_vsfile_cpu >= 0) {
873 /* Read and clear register state from old IMSIC VS-file */
874 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
875 imsic->nr_hw_eix, true, &tmrif);
876
877 /* Free-up old IMSIC VS-file */
878 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
879 } else {
880 /* Read and clear register state from IMSIC SW-file */
881 imsic_swfile_read(vcpu, true, &tmrif);
882 }
883
884 /* Restore register state in the new IMSIC VS-file */
885 imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
886
887 done:
888 /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
889 vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
890 if (new_vsfile_hgei > 0)
891 vcpu->arch.guest_context.hstatus |=
892 ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
893
894 /* Continue run-loop */
895 return 1;
896
897 fail_free_vsfile_hgei:
898 kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
899 return ret;
900 }
901
kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu * vcpu,unsigned long isel,unsigned long * val,unsigned long new_val,unsigned long wr_mask)902 int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
903 unsigned long *val, unsigned long new_val,
904 unsigned long wr_mask)
905 {
906 u32 topei;
907 struct imsic_mrif_eix *eix;
908 int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
909 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
910
911 /* If IMSIC vCPU state not initialized then forward to user space */
912 if (!imsic)
913 return KVM_INSN_EXIT_TO_USER_SPACE;
914
915 if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
916 /* Read pending and enabled interrupt with highest priority */
917 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
918 imsic->nr_msis);
919 if (val)
920 *val = topei;
921
922 /* Writes ignore value and clear top pending interrupt */
923 if (topei && wr_mask) {
924 topei >>= TOPEI_ID_SHIFT;
925 if (topei) {
926 eix = &imsic->swfile->eix[topei /
927 BITS_PER_TYPE(u64)];
928 clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
929 eix->eip);
930 }
931 }
932 } else {
933 r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
934 val, new_val, wr_mask);
935 /* Forward unknown IMSIC register to user-space */
936 if (r)
937 rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
938 }
939
940 if (wr_mask)
941 imsic_swfile_extirq_update(vcpu);
942
943 return rc;
944 }
945
kvm_riscv_aia_imsic_rw_attr(struct kvm * kvm,unsigned long type,bool write,unsigned long * val)946 int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
947 bool write, unsigned long *val)
948 {
949 u32 isel, vcpu_id;
950 unsigned long flags;
951 struct imsic *imsic;
952 struct kvm_vcpu *vcpu;
953 int rc, vsfile_hgei, vsfile_cpu;
954
955 if (!kvm_riscv_aia_initialized(kvm))
956 return -ENODEV;
957
958 vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
959 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
960 if (!vcpu)
961 return -ENODEV;
962
963 imsic = vcpu->arch.aia_context.imsic_state;
964 if (!imsic)
965 return -ENODEV;
966 isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
967
968 read_lock_irqsave(&imsic->vsfile_lock, flags);
969
970 rc = 0;
971 vsfile_hgei = imsic->vsfile_hgei;
972 vsfile_cpu = imsic->vsfile_cpu;
973 if (vsfile_cpu < 0) {
974 if (write) {
975 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
976 isel, NULL, *val, -1UL);
977 imsic_swfile_extirq_update(vcpu);
978 } else
979 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
980 isel, val, 0, 0);
981 }
982
983 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
984
985 if (!rc && vsfile_cpu >= 0)
986 rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
987 isel, write, val);
988
989 return rc;
990 }
991
kvm_riscv_aia_imsic_has_attr(struct kvm * kvm,unsigned long type)992 int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
993 {
994 u32 isel, vcpu_id;
995 struct imsic *imsic;
996 struct kvm_vcpu *vcpu;
997
998 if (!kvm_riscv_aia_initialized(kvm))
999 return -ENODEV;
1000
1001 vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
1002 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1003 if (!vcpu)
1004 return -ENODEV;
1005
1006 imsic = vcpu->arch.aia_context.imsic_state;
1007 if (!imsic)
1008 return -ENODEV;
1009
1010 isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
1011 return imsic_mrif_isel_check(imsic->nr_eix, isel);
1012 }
1013
kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu * vcpu)1014 void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
1015 {
1016 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
1017
1018 if (!imsic)
1019 return;
1020
1021 kvm_riscv_vcpu_aia_imsic_release(vcpu);
1022
1023 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
1024 }
1025
kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu * vcpu,u32 guest_index,u32 offset,u32 iid)1026 int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
1027 u32 guest_index, u32 offset, u32 iid)
1028 {
1029 unsigned long flags;
1030 struct imsic_mrif_eix *eix;
1031 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
1032
1033 /* We only emulate one IMSIC MMIO page for each Guest VCPU */
1034 if (!imsic || !iid || guest_index ||
1035 (offset != IMSIC_MMIO_SETIPNUM_LE &&
1036 offset != IMSIC_MMIO_SETIPNUM_BE))
1037 return -ENODEV;
1038
1039 iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
1040 if (imsic->nr_msis <= iid)
1041 return -EINVAL;
1042
1043 read_lock_irqsave(&imsic->vsfile_lock, flags);
1044
1045 if (imsic->vsfile_cpu >= 0) {
1046 writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
1047 } else {
1048 eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
1049 set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
1050 imsic_swfile_extirq_update(vcpu);
1051 }
1052
1053 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
1054
1055 return 0;
1056 }
1057
imsic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)1058 static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1059 gpa_t addr, int len, void *val)
1060 {
1061 if (len != 4 || (addr & 0x3) != 0)
1062 return -EOPNOTSUPP;
1063
1064 *((u32 *)val) = 0;
1065
1066 return 0;
1067 }
1068
imsic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)1069 static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1070 gpa_t addr, int len, const void *val)
1071 {
1072 struct kvm_msi msi = { 0 };
1073
1074 if (len != 4 || (addr & 0x3) != 0)
1075 return -EOPNOTSUPP;
1076
1077 msi.address_hi = addr >> 32;
1078 msi.address_lo = (u32)addr;
1079 msi.data = *((const u32 *)val);
1080 kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
1081
1082 return 0;
1083 };
1084
1085 static struct kvm_io_device_ops imsic_iodoev_ops = {
1086 .read = imsic_mmio_read,
1087 .write = imsic_mmio_write,
1088 };
1089
kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu * vcpu)1090 int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
1091 {
1092 int ret = 0;
1093 struct imsic *imsic;
1094 struct page *swfile_page;
1095 struct kvm *kvm = vcpu->kvm;
1096
1097 /* Fail if we have zero IDs */
1098 if (!kvm->arch.aia.nr_ids)
1099 return -EINVAL;
1100
1101 /* Allocate IMSIC context */
1102 imsic = kzalloc_obj(*imsic);
1103 if (!imsic)
1104 return -ENOMEM;
1105 vcpu->arch.aia_context.imsic_state = imsic;
1106
1107 /* Setup IMSIC context */
1108 imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
1109 rwlock_init(&imsic->vsfile_lock);
1110 imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
1111 imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
1112 imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
1113
1114 /* Setup IMSIC SW-file */
1115 swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
1116 get_order(sizeof(*imsic->swfile)));
1117 if (!swfile_page) {
1118 ret = -ENOMEM;
1119 goto fail_free_imsic;
1120 }
1121 imsic->swfile = page_to_virt(swfile_page);
1122 imsic->swfile_pa = page_to_phys(swfile_page);
1123 raw_spin_lock_init(&imsic->swfile_extirq_lock);
1124
1125 /* Setup IO device */
1126 kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
1127 mutex_lock(&kvm->slots_lock);
1128 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
1129 vcpu->arch.aia_context.imsic_addr,
1130 KVM_DEV_RISCV_IMSIC_SIZE,
1131 &imsic->iodev);
1132 mutex_unlock(&kvm->slots_lock);
1133 if (ret)
1134 goto fail_free_swfile;
1135
1136 return 0;
1137
1138 fail_free_swfile:
1139 free_pages((unsigned long)imsic->swfile,
1140 get_order(sizeof(*imsic->swfile)));
1141 fail_free_imsic:
1142 vcpu->arch.aia_context.imsic_state = NULL;
1143 kfree(imsic);
1144 return ret;
1145 }
1146
kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu * vcpu)1147 void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
1148 {
1149 struct kvm *kvm = vcpu->kvm;
1150 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
1151
1152 if (!imsic)
1153 return;
1154
1155 imsic_vsfile_cleanup(imsic);
1156
1157 mutex_lock(&kvm->slots_lock);
1158 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
1159 mutex_unlock(&kvm->slots_lock);
1160
1161 free_pages((unsigned long)imsic->swfile,
1162 get_order(sizeof(*imsic->swfile)));
1163
1164 vcpu->arch.aia_context.imsic_state = NULL;
1165 kfree(imsic);
1166 }
1167