xref: /linux/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c (revision d82689bdd828833bd582c2bf7a85071cacb52990)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARM Generic Interrupt Controller (GIC) v3 support
4  */
5 
6 #include <linux/sizes.h>
7 
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "delay.h"
11 
12 #include "gic.h"
13 #include "gic_v3.h"
14 #include "gic_private.h"
15 
16 #define GICV3_MAX_CPUS			512
17 
18 #define GICD_INT_DEF_PRI		0xa0
19 #define GICD_INT_DEF_PRI_X4		((GICD_INT_DEF_PRI << 24) |\
20 					(GICD_INT_DEF_PRI << 16) |\
21 					(GICD_INT_DEF_PRI << 8) |\
22 					GICD_INT_DEF_PRI)
23 
24 #define ICC_PMR_DEF_PRIO		0xf0
25 
26 struct gicv3_data {
27 	void *dist_base;
28 	void *redist_base[GICV3_MAX_CPUS];
29 	unsigned int nr_cpus;
30 	unsigned int nr_spis;
31 };
32 
33 #define sgi_base_from_redist(redist_base)	(redist_base + SZ_64K)
34 #define DIST_BIT				(1U << 31)
35 
36 enum gicv3_intid_range {
37 	SGI_RANGE,
38 	PPI_RANGE,
39 	SPI_RANGE,
40 	INVALID_RANGE,
41 };
42 
43 static struct gicv3_data gicv3_data;
44 
45 static void gicv3_gicd_wait_for_rwp(void)
46 {
47 	unsigned int count = 100000; /* 1s */
48 
49 	while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) {
50 		GUEST_ASSERT(count--);
51 		udelay(10);
52 	}
53 }
54 
55 static void gicv3_gicr_wait_for_rwp(void *redist_base)
56 {
57 	unsigned int count = 100000; /* 1s */
58 
59 	while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) {
60 		GUEST_ASSERT(count--);
61 		udelay(10);
62 	}
63 }
64 
65 static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
66 {
67 	if (cpu_or_dist & DIST_BIT)
68 		gicv3_gicd_wait_for_rwp();
69 	else
70 		gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
71 }
72 
73 static enum gicv3_intid_range get_intid_range(unsigned int intid)
74 {
75 	switch (intid) {
76 	case 0 ... 15:
77 		return SGI_RANGE;
78 	case 16 ... 31:
79 		return PPI_RANGE;
80 	case 32 ... 1019:
81 		return SPI_RANGE;
82 	}
83 
84 	/* We should not be reaching here */
85 	GUEST_ASSERT(0);
86 
87 	return INVALID_RANGE;
88 }
89 
90 static uint64_t gicv3_read_iar(void)
91 {
92 	uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
93 
94 	dsb(sy);
95 	return irqstat;
96 }
97 
98 static void gicv3_write_eoir(uint32_t irq)
99 {
100 	write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
101 	isb();
102 }
103 
104 static void gicv3_write_dir(uint32_t irq)
105 {
106 	write_sysreg_s(irq, SYS_ICC_DIR_EL1);
107 	isb();
108 }
109 
110 static void gicv3_set_priority_mask(uint64_t mask)
111 {
112 	write_sysreg_s(mask, SYS_ICC_PMR_EL1);
113 }
114 
115 static void gicv3_set_eoi_split(bool split)
116 {
117 	uint32_t val;
118 
119 	/*
120 	 * All other fields are read-only, so no need to read CTLR first. In
121 	 * fact, the kernel does the same.
122 	 */
123 	val = split ? (1U << 1) : 0;
124 	write_sysreg_s(val, SYS_ICC_CTLR_EL1);
125 	isb();
126 }
127 
128 uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
129 {
130 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
131 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
132 	return readl(base + offset);
133 }
134 
135 void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
136 {
137 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
138 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
139 	writel(reg_val, base + offset);
140 }
141 
142 uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
143 {
144 	return gicv3_reg_readl(cpu_or_dist, offset) & mask;
145 }
146 
147 void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
148 		uint32_t mask, uint32_t reg_val)
149 {
150 	uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
151 
152 	tmp |= (reg_val & mask);
153 	gicv3_reg_writel(cpu_or_dist, offset, tmp);
154 }
155 
156 /*
157  * We use a single offset for the distributor and redistributor maps as they
158  * have the same value in both. The only exceptions are registers that only
159  * exist in one and not the other, like GICR_WAKER that doesn't exist in the
160  * distributor map. Such registers are conveniently marked as reserved in the
161  * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
162  * marked as "Reserved" in the Distributor map.
163  */
164 static void gicv3_access_reg(uint32_t intid, uint64_t offset,
165 		uint32_t reg_bits, uint32_t bits_per_field,
166 		bool write, uint32_t *val)
167 {
168 	uint32_t cpu = guest_get_vcpuid();
169 	enum gicv3_intid_range intid_range = get_intid_range(intid);
170 	uint32_t fields_per_reg, index, mask, shift;
171 	uint32_t cpu_or_dist;
172 
173 	GUEST_ASSERT(bits_per_field <= reg_bits);
174 	GUEST_ASSERT(!write || *val < (1U << bits_per_field));
175 	/*
176 	 * This function does not support 64 bit accesses. Just asserting here
177 	 * until we implement readq/writeq.
178 	 */
179 	GUEST_ASSERT(reg_bits == 32);
180 
181 	fields_per_reg = reg_bits / bits_per_field;
182 	index = intid % fields_per_reg;
183 	shift = index * bits_per_field;
184 	mask = ((1U << bits_per_field) - 1) << shift;
185 
186 	/* Set offset to the actual register holding intid's config. */
187 	offset += (intid / fields_per_reg) * (reg_bits / 8);
188 
189 	cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
190 
191 	if (write)
192 		gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
193 	*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
194 }
195 
196 static void gicv3_write_reg(uint32_t intid, uint64_t offset,
197 		uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
198 {
199 	gicv3_access_reg(intid, offset, reg_bits,
200 			bits_per_field, true, &val);
201 }
202 
203 static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
204 		uint32_t reg_bits, uint32_t bits_per_field)
205 {
206 	uint32_t val;
207 
208 	gicv3_access_reg(intid, offset, reg_bits,
209 			bits_per_field, false, &val);
210 	return val;
211 }
212 
213 static void gicv3_set_priority(uint32_t intid, uint32_t prio)
214 {
215 	gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
216 }
217 
218 /* Sets the intid to be level-sensitive or edge-triggered. */
219 static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
220 {
221 	uint32_t val;
222 
223 	/* N/A for private interrupts. */
224 	GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
225 	val = is_edge ? 2 : 0;
226 	gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
227 }
228 
229 static void gicv3_irq_enable(uint32_t intid)
230 {
231 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
232 	uint32_t cpu = guest_get_vcpuid();
233 
234 	gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
235 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
236 }
237 
238 static void gicv3_irq_disable(uint32_t intid)
239 {
240 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
241 	uint32_t cpu = guest_get_vcpuid();
242 
243 	gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
244 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
245 }
246 
247 static void gicv3_irq_set_active(uint32_t intid)
248 {
249 	gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
250 }
251 
252 static void gicv3_irq_clear_active(uint32_t intid)
253 {
254 	gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
255 }
256 
257 static bool gicv3_irq_get_active(uint32_t intid)
258 {
259 	return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
260 }
261 
262 static void gicv3_irq_set_pending(uint32_t intid)
263 {
264 	gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
265 }
266 
267 static void gicv3_irq_clear_pending(uint32_t intid)
268 {
269 	gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
270 }
271 
272 static bool gicv3_irq_get_pending(uint32_t intid)
273 {
274 	return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
275 }
276 
277 static void gicv3_enable_redist(void *redist_base)
278 {
279 	uint32_t val = readl(redist_base + GICR_WAKER);
280 	unsigned int count = 100000; /* 1s */
281 
282 	val &= ~GICR_WAKER_ProcessorSleep;
283 	writel(val, redist_base + GICR_WAKER);
284 
285 	/* Wait until the processor is 'active' */
286 	while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
287 		GUEST_ASSERT(count--);
288 		udelay(10);
289 	}
290 }
291 
292 static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu)
293 {
294 	/* Align all the redistributors sequentially */
295 	return redist_base + cpu * SZ_64K * 2;
296 }
297 
298 static void gicv3_cpu_init(unsigned int cpu, void *redist_base)
299 {
300 	void *sgi_base;
301 	unsigned int i;
302 	void *redist_base_cpu;
303 
304 	GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
305 
306 	redist_base_cpu = gicr_base_cpu(redist_base, cpu);
307 	sgi_base = sgi_base_from_redist(redist_base_cpu);
308 
309 	gicv3_enable_redist(redist_base_cpu);
310 
311 	/*
312 	 * Mark all the SGI and PPI interrupts as non-secure Group-1.
313 	 * Also, deactivate and disable them.
314 	 */
315 	writel(~0, sgi_base + GICR_IGROUPR0);
316 	writel(~0, sgi_base + GICR_ICACTIVER0);
317 	writel(~0, sgi_base + GICR_ICENABLER0);
318 
319 	/* Set a default priority for all the SGIs and PPIs */
320 	for (i = 0; i < 32; i += 4)
321 		writel(GICD_INT_DEF_PRI_X4,
322 				sgi_base + GICR_IPRIORITYR0 + i);
323 
324 	gicv3_gicr_wait_for_rwp(redist_base_cpu);
325 
326 	/* Enable the GIC system register (ICC_*) access */
327 	write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
328 			SYS_ICC_SRE_EL1);
329 
330 	/* Set a default priority threshold */
331 	write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
332 
333 	/* Enable non-secure Group-1 interrupts */
334 	write_sysreg_s(ICC_IGRPEN1_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
335 
336 	gicv3_data.redist_base[cpu] = redist_base_cpu;
337 }
338 
339 static void gicv3_dist_init(void)
340 {
341 	void *dist_base = gicv3_data.dist_base;
342 	unsigned int i;
343 
344 	/* Disable the distributor until we set things up */
345 	writel(0, dist_base + GICD_CTLR);
346 	gicv3_gicd_wait_for_rwp();
347 
348 	/*
349 	 * Mark all the SPI interrupts as non-secure Group-1.
350 	 * Also, deactivate and disable them.
351 	 */
352 	for (i = 32; i < gicv3_data.nr_spis; i += 32) {
353 		writel(~0, dist_base + GICD_IGROUPR + i / 8);
354 		writel(~0, dist_base + GICD_ICACTIVER + i / 8);
355 		writel(~0, dist_base + GICD_ICENABLER + i / 8);
356 	}
357 
358 	/* Set a default priority for all the SPIs */
359 	for (i = 32; i < gicv3_data.nr_spis; i += 4)
360 		writel(GICD_INT_DEF_PRI_X4,
361 				dist_base + GICD_IPRIORITYR + i);
362 
363 	/* Wait for the settings to sync-in */
364 	gicv3_gicd_wait_for_rwp();
365 
366 	/* Finally, enable the distributor globally with ARE */
367 	writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
368 			GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR);
369 	gicv3_gicd_wait_for_rwp();
370 }
371 
372 static void gicv3_init(unsigned int nr_cpus, void *dist_base)
373 {
374 	GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
375 
376 	gicv3_data.nr_cpus = nr_cpus;
377 	gicv3_data.dist_base = dist_base;
378 	gicv3_data.nr_spis = GICD_TYPER_SPIS(
379 				readl(gicv3_data.dist_base + GICD_TYPER));
380 	if (gicv3_data.nr_spis > 1020)
381 		gicv3_data.nr_spis = 1020;
382 
383 	/*
384 	 * Initialize only the distributor for now.
385 	 * The redistributor and CPU interfaces are initialized
386 	 * later for every PE.
387 	 */
388 	gicv3_dist_init();
389 }
390 
391 const struct gic_common_ops gicv3_ops = {
392 	.gic_init = gicv3_init,
393 	.gic_cpu_init = gicv3_cpu_init,
394 	.gic_irq_enable = gicv3_irq_enable,
395 	.gic_irq_disable = gicv3_irq_disable,
396 	.gic_read_iar = gicv3_read_iar,
397 	.gic_write_eoir = gicv3_write_eoir,
398 	.gic_write_dir = gicv3_write_dir,
399 	.gic_set_priority_mask = gicv3_set_priority_mask,
400 	.gic_set_eoi_split = gicv3_set_eoi_split,
401 	.gic_set_priority = gicv3_set_priority,
402 	.gic_irq_set_active = gicv3_irq_set_active,
403 	.gic_irq_clear_active = gicv3_irq_clear_active,
404 	.gic_irq_get_active = gicv3_irq_get_active,
405 	.gic_irq_set_pending = gicv3_irq_set_pending,
406 	.gic_irq_clear_pending = gicv3_irq_clear_pending,
407 	.gic_irq_get_pending = gicv3_irq_get_pending,
408 	.gic_irq_set_config = gicv3_irq_set_config,
409 };
410