1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ARM Generic Interrupt Controller (GIC) v3 support
4 */
5
6 #include <linux/sizes.h>
7
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "delay.h"
11
12 #include "gic.h"
13 #include "gic_v3.h"
14 #include "gic_private.h"
15
16 #define GICV3_MAX_CPUS 512
17
18 #define GICD_INT_DEF_PRI 0xa0
19 #define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
20 (GICD_INT_DEF_PRI << 16) |\
21 (GICD_INT_DEF_PRI << 8) |\
22 GICD_INT_DEF_PRI)
23
24 #define ICC_PMR_DEF_PRIO 0xf0
25
26 struct gicv3_data {
27 unsigned int nr_cpus;
28 unsigned int nr_spis;
29 };
30
31 #define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
32 #define DIST_BIT (1U << 31)
33
34 enum gicv3_intid_range {
35 SGI_RANGE,
36 PPI_RANGE,
37 SPI_RANGE,
38 INVALID_RANGE,
39 };
40
41 static struct gicv3_data gicv3_data;
42
gicv3_gicd_wait_for_rwp(void)43 static void gicv3_gicd_wait_for_rwp(void)
44 {
45 unsigned int count = 100000; /* 1s */
46
47 while (readl(GICD_BASE_GVA + GICD_CTLR) & GICD_CTLR_RWP) {
48 GUEST_ASSERT(count--);
49 udelay(10);
50 }
51 }
52
gicr_base_cpu(uint32_t cpu)53 static inline volatile void *gicr_base_cpu(uint32_t cpu)
54 {
55 /* Align all the redistributors sequentially */
56 return GICR_BASE_GVA + cpu * SZ_64K * 2;
57 }
58
gicv3_gicr_wait_for_rwp(uint32_t cpu)59 static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
60 {
61 unsigned int count = 100000; /* 1s */
62
63 while (readl(gicr_base_cpu(cpu) + GICR_CTLR) & GICR_CTLR_RWP) {
64 GUEST_ASSERT(count--);
65 udelay(10);
66 }
67 }
68
gicv3_wait_for_rwp(uint32_t cpu_or_dist)69 static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
70 {
71 if (cpu_or_dist & DIST_BIT)
72 gicv3_gicd_wait_for_rwp();
73 else
74 gicv3_gicr_wait_for_rwp(cpu_or_dist);
75 }
76
get_intid_range(unsigned int intid)77 static enum gicv3_intid_range get_intid_range(unsigned int intid)
78 {
79 switch (intid) {
80 case 0 ... 15:
81 return SGI_RANGE;
82 case 16 ... 31:
83 return PPI_RANGE;
84 case 32 ... 1019:
85 return SPI_RANGE;
86 }
87
88 /* We should not be reaching here */
89 GUEST_ASSERT(0);
90
91 return INVALID_RANGE;
92 }
93
gicv3_read_iar(void)94 static uint64_t gicv3_read_iar(void)
95 {
96 uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
97
98 dsb(sy);
99 return irqstat;
100 }
101
gicv3_write_eoir(uint32_t irq)102 static void gicv3_write_eoir(uint32_t irq)
103 {
104 write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
105 isb();
106 }
107
gicv3_write_dir(uint32_t irq)108 static void gicv3_write_dir(uint32_t irq)
109 {
110 write_sysreg_s(irq, SYS_ICC_DIR_EL1);
111 isb();
112 }
113
gicv3_set_priority_mask(uint64_t mask)114 static void gicv3_set_priority_mask(uint64_t mask)
115 {
116 write_sysreg_s(mask, SYS_ICC_PMR_EL1);
117 }
118
gicv3_set_eoi_split(bool split)119 static void gicv3_set_eoi_split(bool split)
120 {
121 uint32_t val;
122
123 /*
124 * All other fields are read-only, so no need to read CTLR first. In
125 * fact, the kernel does the same.
126 */
127 val = split ? (1U << 1) : 0;
128 write_sysreg_s(val, SYS_ICC_CTLR_EL1);
129 isb();
130 }
131
gicv3_reg_readl(uint32_t cpu_or_dist,uint64_t offset)132 uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
133 {
134 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
135 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
136 return readl(base + offset);
137 }
138
gicv3_reg_writel(uint32_t cpu_or_dist,uint64_t offset,uint32_t reg_val)139 void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
140 {
141 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
142 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
143 writel(reg_val, base + offset);
144 }
145
gicv3_getl_fields(uint32_t cpu_or_dist,uint64_t offset,uint32_t mask)146 uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
147 {
148 return gicv3_reg_readl(cpu_or_dist, offset) & mask;
149 }
150
gicv3_setl_fields(uint32_t cpu_or_dist,uint64_t offset,uint32_t mask,uint32_t reg_val)151 void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
152 uint32_t mask, uint32_t reg_val)
153 {
154 uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
155
156 tmp |= (reg_val & mask);
157 gicv3_reg_writel(cpu_or_dist, offset, tmp);
158 }
159
160 /*
161 * We use a single offset for the distributor and redistributor maps as they
162 * have the same value in both. The only exceptions are registers that only
163 * exist in one and not the other, like GICR_WAKER that doesn't exist in the
164 * distributor map. Such registers are conveniently marked as reserved in the
165 * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
166 * marked as "Reserved" in the Distributor map.
167 */
gicv3_access_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field,bool write,uint32_t * val)168 static void gicv3_access_reg(uint32_t intid, uint64_t offset,
169 uint32_t reg_bits, uint32_t bits_per_field,
170 bool write, uint32_t *val)
171 {
172 uint32_t cpu = guest_get_vcpuid();
173 enum gicv3_intid_range intid_range = get_intid_range(intid);
174 uint32_t fields_per_reg, index, mask, shift;
175 uint32_t cpu_or_dist;
176
177 GUEST_ASSERT(bits_per_field <= reg_bits);
178 GUEST_ASSERT(!write || *val < (1U << bits_per_field));
179 /*
180 * This function does not support 64 bit accesses. Just asserting here
181 * until we implement readq/writeq.
182 */
183 GUEST_ASSERT(reg_bits == 32);
184
185 fields_per_reg = reg_bits / bits_per_field;
186 index = intid % fields_per_reg;
187 shift = index * bits_per_field;
188 mask = ((1U << bits_per_field) - 1) << shift;
189
190 /* Set offset to the actual register holding intid's config. */
191 offset += (intid / fields_per_reg) * (reg_bits / 8);
192
193 cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
194
195 if (write)
196 gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
197 *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
198 }
199
gicv3_write_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field,uint32_t val)200 static void gicv3_write_reg(uint32_t intid, uint64_t offset,
201 uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
202 {
203 gicv3_access_reg(intid, offset, reg_bits,
204 bits_per_field, true, &val);
205 }
206
gicv3_read_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field)207 static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
208 uint32_t reg_bits, uint32_t bits_per_field)
209 {
210 uint32_t val;
211
212 gicv3_access_reg(intid, offset, reg_bits,
213 bits_per_field, false, &val);
214 return val;
215 }
216
gicv3_set_priority(uint32_t intid,uint32_t prio)217 static void gicv3_set_priority(uint32_t intid, uint32_t prio)
218 {
219 gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
220 }
221
222 /* Sets the intid to be level-sensitive or edge-triggered. */
gicv3_irq_set_config(uint32_t intid,bool is_edge)223 static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
224 {
225 uint32_t val;
226
227 /* N/A for private interrupts. */
228 GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
229 val = is_edge ? 2 : 0;
230 gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
231 }
232
gicv3_irq_enable(uint32_t intid)233 static void gicv3_irq_enable(uint32_t intid)
234 {
235 bool is_spi = get_intid_range(intid) == SPI_RANGE;
236 uint32_t cpu = guest_get_vcpuid();
237
238 gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
239 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
240 }
241
gicv3_irq_disable(uint32_t intid)242 static void gicv3_irq_disable(uint32_t intid)
243 {
244 bool is_spi = get_intid_range(intid) == SPI_RANGE;
245 uint32_t cpu = guest_get_vcpuid();
246
247 gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
248 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
249 }
250
gicv3_irq_set_active(uint32_t intid)251 static void gicv3_irq_set_active(uint32_t intid)
252 {
253 gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
254 }
255
gicv3_irq_clear_active(uint32_t intid)256 static void gicv3_irq_clear_active(uint32_t intid)
257 {
258 gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
259 }
260
gicv3_irq_get_active(uint32_t intid)261 static bool gicv3_irq_get_active(uint32_t intid)
262 {
263 return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
264 }
265
gicv3_irq_set_pending(uint32_t intid)266 static void gicv3_irq_set_pending(uint32_t intid)
267 {
268 gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
269 }
270
gicv3_irq_clear_pending(uint32_t intid)271 static void gicv3_irq_clear_pending(uint32_t intid)
272 {
273 gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
274 }
275
gicv3_irq_get_pending(uint32_t intid)276 static bool gicv3_irq_get_pending(uint32_t intid)
277 {
278 return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
279 }
280
gicv3_enable_redist(volatile void * redist_base)281 static void gicv3_enable_redist(volatile void *redist_base)
282 {
283 uint32_t val = readl(redist_base + GICR_WAKER);
284 unsigned int count = 100000; /* 1s */
285
286 val &= ~GICR_WAKER_ProcessorSleep;
287 writel(val, redist_base + GICR_WAKER);
288
289 /* Wait until the processor is 'active' */
290 while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
291 GUEST_ASSERT(count--);
292 udelay(10);
293 }
294 }
295
gicv3_set_group(uint32_t intid,bool grp)296 static void gicv3_set_group(uint32_t intid, bool grp)
297 {
298 uint32_t cpu_or_dist;
299 uint32_t val;
300
301 cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
302 val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
303 if (grp)
304 val |= BIT(intid % 32);
305 else
306 val &= ~BIT(intid % 32);
307 gicv3_reg_writel(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4, val);
308 }
309
gicv3_cpu_init(unsigned int cpu)310 static void gicv3_cpu_init(unsigned int cpu)
311 {
312 volatile void *sgi_base;
313 unsigned int i;
314 volatile void *redist_base_cpu;
315 u64 typer;
316
317 GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
318
319 redist_base_cpu = gicr_base_cpu(cpu);
320 sgi_base = sgi_base_from_redist(redist_base_cpu);
321
322 /* Verify assumption that GICR_TYPER.Processor_number == cpu */
323 typer = readq_relaxed(redist_base_cpu + GICR_TYPER);
324 GUEST_ASSERT_EQ(GICR_TYPER_CPU_NUMBER(typer), cpu);
325
326 gicv3_enable_redist(redist_base_cpu);
327
328 /*
329 * Mark all the SGI and PPI interrupts as non-secure Group-1.
330 * Also, deactivate and disable them.
331 */
332 writel(~0, sgi_base + GICR_IGROUPR0);
333 writel(~0, sgi_base + GICR_ICACTIVER0);
334 writel(~0, sgi_base + GICR_ICENABLER0);
335
336 /* Set a default priority for all the SGIs and PPIs */
337 for (i = 0; i < 32; i += 4)
338 writel(GICD_INT_DEF_PRI_X4,
339 sgi_base + GICR_IPRIORITYR0 + i);
340
341 gicv3_gicr_wait_for_rwp(cpu);
342
343 /* Enable the GIC system register (ICC_*) access */
344 write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
345 SYS_ICC_SRE_EL1);
346
347 /* Set a default priority threshold */
348 write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
349
350 /* Disable Group-0 interrupts */
351 write_sysreg_s(ICC_IGRPEN0_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
352 /* Enable non-secure Group-1 interrupts */
353 write_sysreg_s(ICC_IGRPEN1_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
354 }
355
gicv3_dist_init(void)356 static void gicv3_dist_init(void)
357 {
358 unsigned int i;
359
360 /* Disable the distributor until we set things up */
361 writel(0, GICD_BASE_GVA + GICD_CTLR);
362 gicv3_gicd_wait_for_rwp();
363
364 /*
365 * Mark all the SPI interrupts as non-secure Group-1.
366 * Also, deactivate and disable them.
367 */
368 for (i = 32; i < gicv3_data.nr_spis; i += 32) {
369 writel(~0, GICD_BASE_GVA + GICD_IGROUPR + i / 8);
370 writel(~0, GICD_BASE_GVA + GICD_ICACTIVER + i / 8);
371 writel(~0, GICD_BASE_GVA + GICD_ICENABLER + i / 8);
372 }
373
374 /* Set a default priority for all the SPIs */
375 for (i = 32; i < gicv3_data.nr_spis; i += 4)
376 writel(GICD_INT_DEF_PRI_X4,
377 GICD_BASE_GVA + GICD_IPRIORITYR + i);
378
379 /* Wait for the settings to sync-in */
380 gicv3_gicd_wait_for_rwp();
381
382 /* Finally, enable the distributor globally with ARE */
383 writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
384 GICD_CTLR_ENABLE_G1, GICD_BASE_GVA + GICD_CTLR);
385 gicv3_gicd_wait_for_rwp();
386 }
387
gicv3_init(unsigned int nr_cpus)388 static void gicv3_init(unsigned int nr_cpus)
389 {
390 GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
391
392 gicv3_data.nr_cpus = nr_cpus;
393 gicv3_data.nr_spis = GICD_TYPER_SPIS(
394 readl(GICD_BASE_GVA + GICD_TYPER));
395 if (gicv3_data.nr_spis > 1020)
396 gicv3_data.nr_spis = 1020;
397
398 /*
399 * Initialize only the distributor for now.
400 * The redistributor and CPU interfaces are initialized
401 * later for every PE.
402 */
403 gicv3_dist_init();
404 }
405
406 const struct gic_common_ops gicv3_ops = {
407 .gic_init = gicv3_init,
408 .gic_cpu_init = gicv3_cpu_init,
409 .gic_irq_enable = gicv3_irq_enable,
410 .gic_irq_disable = gicv3_irq_disable,
411 .gic_read_iar = gicv3_read_iar,
412 .gic_write_eoir = gicv3_write_eoir,
413 .gic_write_dir = gicv3_write_dir,
414 .gic_set_priority_mask = gicv3_set_priority_mask,
415 .gic_set_eoi_split = gicv3_set_eoi_split,
416 .gic_set_priority = gicv3_set_priority,
417 .gic_irq_set_active = gicv3_irq_set_active,
418 .gic_irq_clear_active = gicv3_irq_clear_active,
419 .gic_irq_get_active = gicv3_irq_get_active,
420 .gic_irq_set_pending = gicv3_irq_set_pending,
421 .gic_irq_clear_pending = gicv3_irq_clear_pending,
422 .gic_irq_get_pending = gicv3_irq_get_pending,
423 .gic_irq_set_config = gicv3_irq_set_config,
424 .gic_irq_set_group = gicv3_set_group,
425 };
426
gic_rdist_enable_lpis(vm_paddr_t cfg_table,size_t cfg_table_size,vm_paddr_t pend_table)427 void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
428 vm_paddr_t pend_table)
429 {
430 volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid());
431
432 u32 ctlr;
433 u64 val;
434
435 val = (cfg_table |
436 GICR_PROPBASER_InnerShareable |
437 GICR_PROPBASER_RaWaWb |
438 ((ilog2(cfg_table_size) - 1) & GICR_PROPBASER_IDBITS_MASK));
439 writeq_relaxed(val, rdist_base + GICR_PROPBASER);
440
441 val = (pend_table |
442 GICR_PENDBASER_InnerShareable |
443 GICR_PENDBASER_RaWaWb);
444 writeq_relaxed(val, rdist_base + GICR_PENDBASER);
445
446 ctlr = readl_relaxed(rdist_base + GICR_CTLR);
447 ctlr |= GICR_CTLR_ENABLE_LPIS;
448 writel_relaxed(ctlr, rdist_base + GICR_CTLR);
449 }
450