xref: /linux/arch/riscv/kvm/aia_aplic.c (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/irqchip/riscv-aplic.h>
11 #include <linux/kvm_host.h>
12 #include <linux/math.h>
13 #include <linux/nospec.h>
14 #include <linux/spinlock.h>
15 #include <linux/swab.h>
16 #include <kvm/iodev.h>
17 
18 struct aplic_irq {
19 	raw_spinlock_t lock;
20 	u32 sourcecfg;
21 	u32 state;
22 #define APLIC_IRQ_STATE_PENDING		BIT(0)
23 #define APLIC_IRQ_STATE_ENABLED		BIT(1)
24 #define APLIC_IRQ_STATE_ENPEND		(APLIC_IRQ_STATE_PENDING | \
25 					 APLIC_IRQ_STATE_ENABLED)
26 #define APLIC_IRQ_STATE_INPUT		BIT(8)
27 	u32 target;
28 };
29 
30 struct aplic {
31 	struct kvm_io_device iodev;
32 
33 	u32 domaincfg;
34 	u32 genmsi;
35 
36 	u32 nr_irqs;
37 	u32 nr_words;
38 	struct aplic_irq *irqs;
39 };
40 
aplic_read_sourcecfg(struct aplic * aplic,u32 irq)41 static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
42 {
43 	u32 ret;
44 	unsigned long flags;
45 	struct aplic_irq *irqd;
46 
47 	if (!irq || aplic->nr_irqs <= irq)
48 		return 0;
49 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
50 
51 	raw_spin_lock_irqsave(&irqd->lock, flags);
52 	ret = irqd->sourcecfg;
53 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
54 
55 	return ret;
56 }
57 
aplic_write_sourcecfg(struct aplic * aplic,u32 irq,u32 val)58 static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
59 {
60 	unsigned long flags;
61 	struct aplic_irq *irqd;
62 
63 	if (!irq || aplic->nr_irqs <= irq)
64 		return;
65 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
66 
67 	if (val & APLIC_SOURCECFG_D)
68 		val = 0;
69 	else
70 		val &= APLIC_SOURCECFG_SM_MASK;
71 
72 	raw_spin_lock_irqsave(&irqd->lock, flags);
73 	irqd->sourcecfg = val;
74 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
75 }
76 
aplic_read_target(struct aplic * aplic,u32 irq)77 static u32 aplic_read_target(struct aplic *aplic, u32 irq)
78 {
79 	u32 ret;
80 	unsigned long flags;
81 	struct aplic_irq *irqd;
82 
83 	if (!irq || aplic->nr_irqs <= irq)
84 		return 0;
85 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
86 
87 	raw_spin_lock_irqsave(&irqd->lock, flags);
88 	ret = irqd->target;
89 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
90 
91 	return ret;
92 }
93 
aplic_write_target(struct aplic * aplic,u32 irq,u32 val)94 static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
95 {
96 	unsigned long flags;
97 	struct aplic_irq *irqd;
98 
99 	if (!irq || aplic->nr_irqs <= irq)
100 		return;
101 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
102 
103 	val &= APLIC_TARGET_EIID_MASK |
104 	       (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
105 	       (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
106 
107 	raw_spin_lock_irqsave(&irqd->lock, flags);
108 	irqd->target = val;
109 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
110 }
111 
aplic_read_pending(struct aplic * aplic,u32 irq)112 static bool aplic_read_pending(struct aplic *aplic, u32 irq)
113 {
114 	bool ret;
115 	unsigned long flags;
116 	struct aplic_irq *irqd;
117 
118 	if (!irq || aplic->nr_irqs <= irq)
119 		return false;
120 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
121 
122 	raw_spin_lock_irqsave(&irqd->lock, flags);
123 	ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
124 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
125 
126 	return ret;
127 }
128 
aplic_write_pending(struct aplic * aplic,u32 irq,bool pending)129 static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
130 {
131 	unsigned long flags, sm;
132 	struct aplic_irq *irqd;
133 
134 	if (!irq || aplic->nr_irqs <= irq)
135 		return;
136 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
137 
138 	raw_spin_lock_irqsave(&irqd->lock, flags);
139 
140 	sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
141 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
142 		goto skip_write_pending;
143 
144 	if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
145 	    sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
146 		if (!pending)
147 			goto noskip_write_pending;
148 		if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
149 		    sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
150 			goto skip_write_pending;
151 		if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
152 		    sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
153 			goto skip_write_pending;
154 	}
155 
156 noskip_write_pending:
157 	if (pending)
158 		irqd->state |= APLIC_IRQ_STATE_PENDING;
159 	else
160 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
161 
162 skip_write_pending:
163 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
164 }
165 
aplic_read_enabled(struct aplic * aplic,u32 irq)166 static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
167 {
168 	bool ret;
169 	unsigned long flags;
170 	struct aplic_irq *irqd;
171 
172 	if (!irq || aplic->nr_irqs <= irq)
173 		return false;
174 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
175 
176 	raw_spin_lock_irqsave(&irqd->lock, flags);
177 	ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
178 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
179 
180 	return ret;
181 }
182 
aplic_write_enabled(struct aplic * aplic,u32 irq,bool enabled)183 static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
184 {
185 	unsigned long flags;
186 	struct aplic_irq *irqd;
187 
188 	if (!irq || aplic->nr_irqs <= irq)
189 		return;
190 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
191 
192 	raw_spin_lock_irqsave(&irqd->lock, flags);
193 	if (enabled)
194 		irqd->state |= APLIC_IRQ_STATE_ENABLED;
195 	else
196 		irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
197 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
198 }
199 
aplic_read_input(struct aplic * aplic,u32 irq)200 static bool aplic_read_input(struct aplic *aplic, u32 irq)
201 {
202 	u32 sourcecfg, sm, raw_input, irq_inverted;
203 	struct aplic_irq *irqd;
204 	unsigned long flags;
205 	bool ret = false;
206 
207 	if (!irq || aplic->nr_irqs <= irq)
208 		return false;
209 	irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
210 
211 	raw_spin_lock_irqsave(&irqd->lock, flags);
212 
213 	sourcecfg = irqd->sourcecfg;
214 	if (sourcecfg & APLIC_SOURCECFG_D)
215 		goto skip;
216 
217 	sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
218 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
219 		goto skip;
220 
221 	raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
222 	irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
223 			sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
224 	ret = !!(raw_input ^ irq_inverted);
225 
226 skip:
227 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
228 
229 	return ret;
230 }
231 
aplic_inject_msi(struct kvm * kvm,u32 irq,u32 target)232 static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
233 {
234 	u32 hart_idx, guest_idx, eiid;
235 
236 	hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
237 	hart_idx &= APLIC_TARGET_HART_IDX_MASK;
238 	guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
239 	guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
240 	eiid = target & APLIC_TARGET_EIID_MASK;
241 	kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
242 }
243 
aplic_update_irq_range(struct kvm * kvm,u32 first,u32 last)244 static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
245 {
246 	bool inject;
247 	u32 irq, target;
248 	unsigned long flags;
249 	struct aplic_irq *irqd;
250 	struct aplic *aplic = kvm->arch.aia.aplic_state;
251 
252 	if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
253 		return;
254 
255 	for (irq = first; irq <= last; irq++) {
256 		if (!irq || aplic->nr_irqs <= irq)
257 			continue;
258 		irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
259 
260 		raw_spin_lock_irqsave(&irqd->lock, flags);
261 
262 		inject = false;
263 		target = irqd->target;
264 		if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
265 		    APLIC_IRQ_STATE_ENPEND) {
266 			irqd->state &= ~APLIC_IRQ_STATE_PENDING;
267 			inject = true;
268 		}
269 
270 		raw_spin_unlock_irqrestore(&irqd->lock, flags);
271 
272 		if (inject)
273 			aplic_inject_msi(kvm, irq, target);
274 	}
275 }
276 
kvm_riscv_aia_aplic_inject(struct kvm * kvm,u32 source,bool level)277 int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
278 {
279 	u32 target;
280 	bool inject = false, ie;
281 	unsigned long flags;
282 	struct aplic_irq *irqd;
283 	struct aplic *aplic = kvm->arch.aia.aplic_state;
284 
285 	if (!aplic || !source || (aplic->nr_irqs <= source))
286 		return -ENODEV;
287 	irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)];
288 	ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
289 
290 	raw_spin_lock_irqsave(&irqd->lock, flags);
291 
292 	if (irqd->sourcecfg & APLIC_SOURCECFG_D)
293 		goto skip_unlock;
294 
295 	switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
296 	case APLIC_SOURCECFG_SM_EDGE_RISE:
297 		if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
298 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
299 			irqd->state |= APLIC_IRQ_STATE_PENDING;
300 		break;
301 	case APLIC_SOURCECFG_SM_EDGE_FALL:
302 		if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
303 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
304 			irqd->state |= APLIC_IRQ_STATE_PENDING;
305 		break;
306 	case APLIC_SOURCECFG_SM_LEVEL_HIGH:
307 		if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
308 			irqd->state |= APLIC_IRQ_STATE_PENDING;
309 		break;
310 	case APLIC_SOURCECFG_SM_LEVEL_LOW:
311 		if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
312 			irqd->state |= APLIC_IRQ_STATE_PENDING;
313 		break;
314 	}
315 
316 	if (level)
317 		irqd->state |= APLIC_IRQ_STATE_INPUT;
318 	else
319 		irqd->state &= ~APLIC_IRQ_STATE_INPUT;
320 
321 	target = irqd->target;
322 	if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
323 		   APLIC_IRQ_STATE_ENPEND)) {
324 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
325 		inject = true;
326 	}
327 
328 skip_unlock:
329 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
330 
331 	if (inject)
332 		aplic_inject_msi(kvm, source, target);
333 
334 	return 0;
335 }
336 
aplic_read_input_word(struct aplic * aplic,u32 word)337 static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
338 {
339 	u32 i, ret = 0;
340 
341 	for (i = 0; i < 32; i++)
342 		ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
343 
344 	return ret;
345 }
346 
aplic_read_pending_word(struct aplic * aplic,u32 word)347 static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
348 {
349 	u32 i, ret = 0;
350 
351 	for (i = 0; i < 32; i++)
352 		ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
353 
354 	return ret;
355 }
356 
aplic_write_pending_word(struct aplic * aplic,u32 word,u32 val,bool pending)357 static void aplic_write_pending_word(struct aplic *aplic, u32 word,
358 				     u32 val, bool pending)
359 {
360 	u32 i;
361 
362 	for (i = 0; i < 32; i++) {
363 		if (val & BIT(i))
364 			aplic_write_pending(aplic, word * 32 + i, pending);
365 	}
366 }
367 
aplic_read_enabled_word(struct aplic * aplic,u32 word)368 static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
369 {
370 	u32 i, ret = 0;
371 
372 	for (i = 0; i < 32; i++)
373 		ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
374 
375 	return ret;
376 }
377 
aplic_write_enabled_word(struct aplic * aplic,u32 word,u32 val,bool enabled)378 static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
379 				     u32 val, bool enabled)
380 {
381 	u32 i;
382 
383 	for (i = 0; i < 32; i++) {
384 		if (val & BIT(i))
385 			aplic_write_enabled(aplic, word * 32 + i, enabled);
386 	}
387 }
388 
aplic_mmio_read_offset(struct kvm * kvm,gpa_t off,u32 * val32)389 static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
390 {
391 	u32 i;
392 	struct aplic *aplic = kvm->arch.aia.aplic_state;
393 
394 	if ((off & 0x3) != 0)
395 		return -EOPNOTSUPP;
396 
397 	if (off == APLIC_DOMAINCFG) {
398 		*val32 = APLIC_DOMAINCFG_RDONLY |
399 			 aplic->domaincfg | APLIC_DOMAINCFG_DM;
400 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
401 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
402 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
403 		*val32 = aplic_read_sourcecfg(aplic, i);
404 	} else if ((off >= APLIC_SETIP_BASE) &&
405 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
406 		i = (off - APLIC_SETIP_BASE) >> 2;
407 		*val32 = aplic_read_pending_word(aplic, i);
408 	} else if (off == APLIC_SETIPNUM) {
409 		*val32 = 0;
410 	} else if ((off >= APLIC_CLRIP_BASE) &&
411 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
412 		i = (off - APLIC_CLRIP_BASE) >> 2;
413 		*val32 = aplic_read_input_word(aplic, i);
414 	} else if (off == APLIC_CLRIPNUM) {
415 		*val32 = 0;
416 	} else if ((off >= APLIC_SETIE_BASE) &&
417 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
418 		i = (off - APLIC_SETIE_BASE) >> 2;
419 		*val32 = aplic_read_enabled_word(aplic, i);
420 	} else if (off == APLIC_SETIENUM) {
421 		*val32 = 0;
422 	} else if ((off >= APLIC_CLRIE_BASE) &&
423 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
424 		*val32 = 0;
425 	} else if (off == APLIC_CLRIENUM) {
426 		*val32 = 0;
427 	} else if (off == APLIC_SETIPNUM_LE) {
428 		*val32 = 0;
429 	} else if (off == APLIC_SETIPNUM_BE) {
430 		*val32 = 0;
431 	} else if (off == APLIC_GENMSI) {
432 		*val32 = aplic->genmsi;
433 	} else if ((off >= APLIC_TARGET_BASE) &&
434 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
435 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
436 		*val32 = aplic_read_target(aplic, i);
437 	} else
438 		return -ENODEV;
439 
440 	return 0;
441 }
442 
aplic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)443 static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
444 			   gpa_t addr, int len, void *val)
445 {
446 	if (len != 4)
447 		return -EOPNOTSUPP;
448 
449 	return aplic_mmio_read_offset(vcpu->kvm,
450 				      addr - vcpu->kvm->arch.aia.aplic_addr,
451 				      val);
452 }
453 
aplic_mmio_write_offset(struct kvm * kvm,gpa_t off,u32 val32)454 static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
455 {
456 	u32 i;
457 	struct aplic *aplic = kvm->arch.aia.aplic_state;
458 
459 	if ((off & 0x3) != 0)
460 		return -EOPNOTSUPP;
461 
462 	if (off == APLIC_DOMAINCFG) {
463 		/* Only IE bit writeable */
464 		aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
465 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
466 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
467 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
468 		aplic_write_sourcecfg(aplic, i, val32);
469 	} else if ((off >= APLIC_SETIP_BASE) &&
470 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
471 		i = (off - APLIC_SETIP_BASE) >> 2;
472 		aplic_write_pending_word(aplic, i, val32, true);
473 	} else if (off == APLIC_SETIPNUM) {
474 		aplic_write_pending(aplic, val32, true);
475 	} else if ((off >= APLIC_CLRIP_BASE) &&
476 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
477 		i = (off - APLIC_CLRIP_BASE) >> 2;
478 		aplic_write_pending_word(aplic, i, val32, false);
479 	} else if (off == APLIC_CLRIPNUM) {
480 		aplic_write_pending(aplic, val32, false);
481 	} else if ((off >= APLIC_SETIE_BASE) &&
482 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
483 		i = (off - APLIC_SETIE_BASE) >> 2;
484 		aplic_write_enabled_word(aplic, i, val32, true);
485 	} else if (off == APLIC_SETIENUM) {
486 		aplic_write_enabled(aplic, val32, true);
487 	} else if ((off >= APLIC_CLRIE_BASE) &&
488 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
489 		i = (off - APLIC_CLRIE_BASE) >> 2;
490 		aplic_write_enabled_word(aplic, i, val32, false);
491 	} else if (off == APLIC_CLRIENUM) {
492 		aplic_write_enabled(aplic, val32, false);
493 	} else if (off == APLIC_SETIPNUM_LE) {
494 		aplic_write_pending(aplic, val32, true);
495 	} else if (off == APLIC_SETIPNUM_BE) {
496 		aplic_write_pending(aplic, __swab32(val32), true);
497 	} else if (off == APLIC_GENMSI) {
498 		aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
499 					  APLIC_TARGET_GUEST_IDX_SHIFT);
500 		kvm_riscv_aia_inject_msi_by_id(kvm,
501 				val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
502 				val32 & APLIC_TARGET_EIID_MASK);
503 	} else if ((off >= APLIC_TARGET_BASE) &&
504 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
505 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
506 		aplic_write_target(aplic, i, val32);
507 	} else
508 		return -ENODEV;
509 
510 	aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
511 
512 	return 0;
513 }
514 
aplic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)515 static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
516 			    gpa_t addr, int len, const void *val)
517 {
518 	if (len != 4)
519 		return -EOPNOTSUPP;
520 
521 	return aplic_mmio_write_offset(vcpu->kvm,
522 				       addr - vcpu->kvm->arch.aia.aplic_addr,
523 				       *((const u32 *)val));
524 }
525 
526 static struct kvm_io_device_ops aplic_iodoev_ops = {
527 	.read = aplic_mmio_read,
528 	.write = aplic_mmio_write,
529 };
530 
kvm_riscv_aia_aplic_set_attr(struct kvm * kvm,unsigned long type,u32 v)531 int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
532 {
533 	int rc;
534 
535 	if (!kvm->arch.aia.aplic_state)
536 		return -ENODEV;
537 
538 	rc = aplic_mmio_write_offset(kvm, type, v);
539 	if (rc)
540 		return rc;
541 
542 	return 0;
543 }
544 
kvm_riscv_aia_aplic_get_attr(struct kvm * kvm,unsigned long type,u32 * v)545 int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
546 {
547 	int rc;
548 
549 	if (!kvm->arch.aia.aplic_state)
550 		return -ENODEV;
551 
552 	rc = aplic_mmio_read_offset(kvm, type, v);
553 	if (rc)
554 		return rc;
555 
556 	return 0;
557 }
558 
kvm_riscv_aia_aplic_has_attr(struct kvm * kvm,unsigned long type)559 int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
560 {
561 	int rc;
562 	u32 val;
563 
564 	if (!kvm->arch.aia.aplic_state)
565 		return -ENODEV;
566 
567 	rc = aplic_mmio_read_offset(kvm, type, &val);
568 	if (rc)
569 		return rc;
570 
571 	return 0;
572 }
573 
kvm_riscv_aia_aplic_init(struct kvm * kvm)574 int kvm_riscv_aia_aplic_init(struct kvm *kvm)
575 {
576 	int i, ret = 0;
577 	struct aplic *aplic;
578 
579 	/* Do nothing if we have zero sources */
580 	if (!kvm->arch.aia.nr_sources)
581 		return 0;
582 
583 	/* Allocate APLIC global state */
584 	aplic = kzalloc_obj(*aplic);
585 	if (!aplic)
586 		return -ENOMEM;
587 	kvm->arch.aia.aplic_state = aplic;
588 
589 	/* Setup APLIC IRQs */
590 	aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
591 	aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
592 	aplic->irqs = kzalloc_objs(*aplic->irqs, aplic->nr_irqs);
593 	if (!aplic->irqs) {
594 		ret = -ENOMEM;
595 		goto fail_free_aplic;
596 	}
597 	for (i = 0; i < aplic->nr_irqs; i++)
598 		raw_spin_lock_init(&aplic->irqs[i].lock);
599 
600 	/* Setup IO device */
601 	kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
602 	mutex_lock(&kvm->slots_lock);
603 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
604 				      kvm->arch.aia.aplic_addr,
605 				      KVM_DEV_RISCV_APLIC_SIZE,
606 				      &aplic->iodev);
607 	mutex_unlock(&kvm->slots_lock);
608 	if (ret)
609 		goto fail_free_aplic_irqs;
610 
611 	/* Setup default IRQ routing */
612 	ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
613 	if (ret)
614 		goto fail_unreg_iodev;
615 
616 	return 0;
617 
618 fail_unreg_iodev:
619 	mutex_lock(&kvm->slots_lock);
620 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
621 	mutex_unlock(&kvm->slots_lock);
622 fail_free_aplic_irqs:
623 	kfree(aplic->irqs);
624 fail_free_aplic:
625 	kvm->arch.aia.aplic_state = NULL;
626 	kfree(aplic);
627 	return ret;
628 }
629 
kvm_riscv_aia_aplic_cleanup(struct kvm * kvm)630 void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
631 {
632 	struct aplic *aplic = kvm->arch.aia.aplic_state;
633 
634 	if (!aplic)
635 		return;
636 
637 	mutex_lock(&kvm->slots_lock);
638 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
639 	mutex_unlock(&kvm->slots_lock);
640 
641 	kfree(aplic->irqs);
642 
643 	kvm->arch.aia.aplic_state = NULL;
644 	kfree(aplic);
645 }
646