xref: /linux/arch/loongarch/kvm/interrupt.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <asm/kvm_csr.h>
9 #include <asm/kvm_vcpu.h>
10 
11 static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
12 	[INT_TI]	= CPU_TIMER,
13 	[INT_IPI]	= CPU_IPI,
14 	[INT_SWI0]	= CPU_SIP0,
15 	[INT_SWI1]	= CPU_SIP1,
16 	[INT_HWI0]	= CPU_IP0,
17 	[INT_HWI1]	= CPU_IP1,
18 	[INT_HWI2]	= CPU_IP2,
19 	[INT_HWI3]	= CPU_IP3,
20 	[INT_HWI4]	= CPU_IP4,
21 	[INT_HWI5]	= CPU_IP5,
22 	[INT_HWI6]	= CPU_IP6,
23 	[INT_HWI7]	= CPU_IP7,
24 };
25 
kvm_irq_deliver(struct kvm_vcpu * vcpu,unsigned int priority)26 static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
27 {
28 	unsigned int irq = 0;
29 
30 	clear_bit(priority, &vcpu->arch.irq_pending);
31 	if (priority < EXCCODE_INT_NUM)
32 		irq = priority_to_irq[priority];
33 
34 	switch (priority) {
35 	case INT_TI:
36 	case INT_IPI:
37 	case INT_SWI0:
38 	case INT_SWI1:
39 		set_gcsr_estat(irq);
40 		break;
41 
42 	case INT_HWI0 ... INT_HWI7:
43 		set_csr_gintc(irq);
44 		break;
45 
46 	default:
47 		break;
48 	}
49 
50 	return 1;
51 }
52 
kvm_irq_clear(struct kvm_vcpu * vcpu,unsigned int priority)53 static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
54 {
55 	unsigned int irq = 0;
56 
57 	clear_bit(priority, &vcpu->arch.irq_clear);
58 	if (priority < EXCCODE_INT_NUM)
59 		irq = priority_to_irq[priority];
60 
61 	switch (priority) {
62 	case INT_TI:
63 	case INT_IPI:
64 	case INT_SWI0:
65 	case INT_SWI1:
66 		clear_gcsr_estat(irq);
67 		break;
68 
69 	case INT_HWI0 ... INT_HWI7:
70 		clear_csr_gintc(irq);
71 		break;
72 
73 	default:
74 		break;
75 	}
76 
77 	return 1;
78 }
79 
kvm_deliver_intr(struct kvm_vcpu * vcpu)80 void kvm_deliver_intr(struct kvm_vcpu *vcpu)
81 {
82 	unsigned int priority;
83 	unsigned long *pending = &vcpu->arch.irq_pending;
84 	unsigned long *pending_clr = &vcpu->arch.irq_clear;
85 
86 	if (!(*pending) && !(*pending_clr))
87 		return;
88 
89 	if (*pending_clr) {
90 		priority = __ffs(*pending_clr);
91 		while (priority <= INT_IPI) {
92 			kvm_irq_clear(vcpu, priority);
93 			priority = find_next_bit(pending_clr,
94 					BITS_PER_BYTE * sizeof(*pending_clr),
95 					priority + 1);
96 		}
97 	}
98 
99 	if (*pending) {
100 		priority = __ffs(*pending);
101 		while (priority <= INT_IPI) {
102 			kvm_irq_deliver(vcpu, priority);
103 			priority = find_next_bit(pending,
104 					BITS_PER_BYTE * sizeof(*pending),
105 					priority + 1);
106 		}
107 	}
108 }
109 
kvm_pending_timer(struct kvm_vcpu * vcpu)110 int kvm_pending_timer(struct kvm_vcpu *vcpu)
111 {
112 	return test_bit(INT_TI, &vcpu->arch.irq_pending);
113 }
114 
115 /*
116  * Only support illegal instruction or illegal Address Error exception,
117  * Other exceptions are injected by hardware in kvm mode
118  */
_kvm_deliver_exception(struct kvm_vcpu * vcpu,unsigned int code,unsigned int subcode)119 static void _kvm_deliver_exception(struct kvm_vcpu *vcpu,
120 				unsigned int code, unsigned int subcode)
121 {
122 	unsigned long val, vec_size;
123 
124 	/*
125 	 * BADV is added for EXCCODE_ADE exception
126 	 *  Use PC register (GVA address) if it is instruction exeception
127 	 *  Else use BADV from host side (GPA address) for data exeception
128 	 */
129 	if (code == EXCCODE_ADE) {
130 		if (subcode == EXSUBCODE_ADEF)
131 			val = vcpu->arch.pc;
132 		else
133 			val = vcpu->arch.badv;
134 		kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val);
135 	}
136 
137 	/* Set exception instruction */
138 	kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi);
139 
140 	/*
141 	 * Save CRMD in PRMD
142 	 * Set IRQ disabled and PLV0 with CRMD
143 	 */
144 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD);
145 	kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val);
146 	val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE);
147 	kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val);
148 
149 	/* Set exception PC address */
150 	kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc);
151 
152 	/*
153 	 * Set exception code
154 	 * Exception and interrupt can be inject at the same time
155 	 * Hardware will handle exception first and then extern interrupt
156 	 * Exception code is Ecode in ESTAT[16:21]
157 	 * Interrupt code in ESTAT[0:12]
158 	 */
159 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT);
160 	val = (val & ~CSR_ESTAT_EXC) | code;
161 	kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val);
162 
163 	/* Calculate expcetion entry address */
164 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG);
165 	vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT;
166 	if (vec_size)
167 		vec_size = (1 << vec_size) * 4;
168 	val =  kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY);
169 	vcpu->arch.pc = val + code * vec_size;
170 }
171 
kvm_deliver_exception(struct kvm_vcpu * vcpu)172 void kvm_deliver_exception(struct kvm_vcpu *vcpu)
173 {
174 	unsigned int code;
175 	unsigned long *pending = &vcpu->arch.exception_pending;
176 
177 	if (*pending) {
178 		code = __ffs(*pending);
179 		_kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode);
180 		*pending = 0;
181 		vcpu->arch.esubcode = 0;
182 	}
183 }
184