xref: /linux/arch/loongarch/kvm/interrupt.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <asm/kvm_csr.h>
9 #include <asm/kvm_vcpu.h>
10 #include <asm/kvm_dmsintc.h>
11 
12 static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
13 	[INT_TI]	= CPU_TIMER,
14 	[INT_IPI]	= CPU_IPI,
15 	[INT_SWI0]	= CPU_SIP0,
16 	[INT_SWI1]	= CPU_SIP1,
17 	[INT_HWI0]	= CPU_IP0,
18 	[INT_HWI1]	= CPU_IP1,
19 	[INT_HWI2]	= CPU_IP2,
20 	[INT_HWI3]	= CPU_IP3,
21 	[INT_HWI4]	= CPU_IP4,
22 	[INT_HWI5]	= CPU_IP5,
23 	[INT_HWI6]	= CPU_IP6,
24 	[INT_HWI7]	= CPU_IP7,
25 	[INT_AVEC]	= CPU_AVEC,
26 };
27 
28 static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
29 {
30 	unsigned int irq = 0;
31 
32 	clear_bit(priority, &vcpu->arch.irq_pending);
33 	if (priority < EXCCODE_INT_NUM)
34 		irq = priority_to_irq[priority];
35 
36 	if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) {
37 		dmsintc_inject_irq(vcpu);
38 		set_gcsr_estat(irq);
39 		return 1;
40 	}
41 
42 	switch (priority) {
43 	case INT_TI:
44 	case INT_IPI:
45 	case INT_SWI0:
46 	case INT_SWI1:
47 		set_gcsr_estat(irq);
48 		break;
49 
50 	case INT_HWI0 ... INT_HWI7:
51 		set_csr_gintc(irq);
52 		break;
53 
54 	default:
55 		break;
56 	}
57 
58 	return 1;
59 }
60 
61 static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
62 {
63 	unsigned int irq = 0;
64 
65 	clear_bit(priority, &vcpu->arch.irq_clear);
66 	if (priority < EXCCODE_INT_NUM)
67 		irq = priority_to_irq[priority];
68 
69 	if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) {
70 		clear_gcsr_estat(irq);
71 		return 1;
72 	}
73 
74 	switch (priority) {
75 	case INT_TI:
76 	case INT_IPI:
77 	case INT_SWI0:
78 	case INT_SWI1:
79 		clear_gcsr_estat(irq);
80 		break;
81 
82 	case INT_HWI0 ... INT_HWI7:
83 		clear_csr_gintc(irq);
84 		break;
85 
86 	default:
87 		break;
88 	}
89 
90 	return 1;
91 }
92 
93 void kvm_deliver_intr(struct kvm_vcpu *vcpu)
94 {
95 	unsigned int priority;
96 	unsigned long *pending = &vcpu->arch.irq_pending;
97 	unsigned long *pending_clr = &vcpu->arch.irq_clear;
98 
99 	for_each_set_bit(priority, pending_clr, EXCCODE_INT_NUM)
100 		kvm_irq_clear(vcpu, priority);
101 
102 	for_each_set_bit(priority, pending, EXCCODE_INT_NUM)
103 		kvm_irq_deliver(vcpu, priority);
104 }
105 
106 int kvm_pending_timer(struct kvm_vcpu *vcpu)
107 {
108 	return test_bit(INT_TI, &vcpu->arch.irq_pending);
109 }
110 
111 /*
112  * Only support illegal instruction or illegal Address Error exception,
113  * Other exceptions are injected by hardware in kvm mode
114  */
115 static void _kvm_deliver_exception(struct kvm_vcpu *vcpu,
116 				unsigned int code, unsigned int subcode)
117 {
118 	unsigned long val, vec_size;
119 
120 	/*
121 	 * BADV is added for EXCCODE_ADE exception
122 	 *  Use PC register (GVA address) if it is instruction exeception
123 	 *  Else use BADV from host side (GPA address) for data exeception
124 	 */
125 	if (code == EXCCODE_ADE) {
126 		if (subcode == EXSUBCODE_ADEF)
127 			val = vcpu->arch.pc;
128 		else
129 			val = vcpu->arch.badv;
130 		kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val);
131 	}
132 
133 	/* Set exception instruction */
134 	kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi);
135 
136 	/*
137 	 * Save CRMD in PRMD
138 	 * Set IRQ disabled and PLV0 with CRMD
139 	 */
140 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD);
141 	kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val);
142 	val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE);
143 	kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val);
144 
145 	/* Set exception PC address */
146 	kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc);
147 
148 	/*
149 	 * Set exception code
150 	 * Exception and interrupt can be inject at the same time
151 	 * Hardware will handle exception first and then extern interrupt
152 	 * Exception code is Ecode in ESTAT[16:21]
153 	 * Interrupt code in ESTAT[0:12]
154 	 */
155 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT);
156 	val = (val & ~CSR_ESTAT_EXC) | code;
157 	kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val);
158 
159 	/* Calculate expcetion entry address */
160 	val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG);
161 	vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT;
162 	if (vec_size)
163 		vec_size = (1 << vec_size) * 4;
164 	val =  kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY);
165 	vcpu->arch.pc = val + code * vec_size;
166 }
167 
168 void kvm_deliver_exception(struct kvm_vcpu *vcpu)
169 {
170 	unsigned int code;
171 	unsigned long *pending = &vcpu->arch.exception_pending;
172 
173 	if (*pending) {
174 		code = __ffs(*pending);
175 		_kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode);
176 		*pending = 0;
177 		vcpu->arch.esubcode = 0;
178 	}
179 }
180