1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/genalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/iopoll.h>
19 #include <linux/irqdomain.h>
20 #include <linux/list.h>
21 #include <linux/log2.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/memblock.h>
24 #include <linux/mm.h>
25 #include <linux/msi.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/percpu.h>
32 #include <linux/set_memory.h>
33 #include <linux/slab.h>
34 #include <linux/syscore_ops.h>
35
36 #include <linux/irqchip.h>
37 #include <linux/irqchip/arm-gic-v3.h>
38 #include <linux/irqchip/arm-gic-v4.h>
39
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
42
43 #include "irq-gic-common.h"
44 #include "irq-gic-its-msi-parent.h"
45 #include <linux/irqchip/irq-msi-lib.h>
46
47 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
49 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
50 #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
51 #define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4)
52
53 #define RD_LOCAL_LPI_ENABLED BIT(0)
54 #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
55 #define RD_LOCAL_MEMRESERVE_DONE BIT(2)
56
57 static u32 lpi_id_bits;
58
59 /*
60 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
61 * deal with (one configuration byte per interrupt). PENDBASE has to
62 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
63 */
64 #define LPI_NRBITS lpi_id_bits
65 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
66 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
67
68 static u8 __ro_after_init lpi_prop_prio;
69 static struct its_node *find_4_1_its(void);
70
71 /*
72 * Collection structure - just an ID, and a redistributor address to
73 * ping. We use one per CPU as a bag of interrupts assigned to this
74 * CPU.
75 */
76 struct its_collection {
77 u64 target_address;
78 u16 col_id;
79 };
80
81 /*
82 * The ITS_BASER structure - contains memory information, cached
83 * value of BASER register configuration and ITS page size.
84 */
85 struct its_baser {
86 void *base;
87 u64 val;
88 u32 order;
89 u32 psz;
90 };
91
92 struct its_device;
93
94 /*
95 * The ITS structure - contains most of the infrastructure, with the
96 * top-level MSI domain, the command queue, the collections, and the
97 * list of devices writing to it.
98 *
99 * dev_alloc_lock has to be taken for device allocations, while the
100 * spinlock must be taken to parse data structures such as the device
101 * list.
102 */
103 struct its_node {
104 raw_spinlock_t lock;
105 struct mutex dev_alloc_lock;
106 struct list_head entry;
107 void __iomem *base;
108 void __iomem *sgir_base;
109 phys_addr_t phys_base;
110 struct its_cmd_block *cmd_base;
111 struct its_cmd_block *cmd_write;
112 struct its_baser tables[GITS_BASER_NR_REGS];
113 struct its_collection *collections;
114 struct fwnode_handle *fwnode_handle;
115 u64 (*get_msi_base)(struct its_device *its_dev);
116 u64 typer;
117 u64 cbaser_save;
118 u32 ctlr_save;
119 u32 mpidr;
120 struct list_head its_device_list;
121 u64 flags;
122 unsigned long list_nr;
123 int numa_node;
124 unsigned int msi_domain_flags;
125 u32 pre_its_base; /* for Socionext Synquacer */
126 int vlpi_redist_offset;
127 };
128
129 static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
130
131 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
132 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
133 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
134
135 #define ITS_ITT_ALIGN SZ_256
136
137 /* The maximum number of VPEID bits supported by VLPI commands */
138 #define ITS_MAX_VPEID_BITS \
139 ({ \
140 int nvpeid = 16; \
141 if (gic_rdists->has_rvpeid && \
142 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
143 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
144 GICD_TYPER2_VID); \
145 \
146 nvpeid; \
147 })
148 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
149
150 /* Convert page order to size in bytes */
151 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
152
153 struct event_lpi_map {
154 unsigned long *lpi_map;
155 u16 *col_map;
156 irq_hw_number_t lpi_base;
157 int nr_lpis;
158 raw_spinlock_t vlpi_lock;
159 struct its_vm *vm;
160 struct its_vlpi_map *vlpi_maps;
161 int nr_vlpis;
162 };
163
164 /*
165 * The ITS view of a device - belongs to an ITS, owns an interrupt
166 * translation table, and a list of interrupts. If it some of its
167 * LPIs are injected into a guest (GICv4), the event_map.vm field
168 * indicates which one.
169 */
170 struct its_device {
171 struct list_head entry;
172 struct its_node *its;
173 struct event_lpi_map event_map;
174 void *itt;
175 u32 itt_sz;
176 u32 nr_ites;
177 u32 device_id;
178 bool shared;
179 };
180
181 static struct {
182 raw_spinlock_t lock;
183 struct its_device *dev;
184 struct its_vpe **vpes;
185 int next_victim;
186 } vpe_proxy;
187
188 struct cpu_lpi_count {
189 atomic_t managed;
190 atomic_t unmanaged;
191 };
192
193 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
194
195 static LIST_HEAD(its_nodes);
196 static DEFINE_RAW_SPINLOCK(its_lock);
197 static struct rdists *gic_rdists;
198 static struct irq_domain *its_parent;
199
200 static unsigned long its_list_map;
201 static u16 vmovp_seq_num;
202 static DEFINE_RAW_SPINLOCK(vmovp_lock);
203
204 static DEFINE_IDA(its_vpeid_ida);
205
206 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
207 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
208 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
209 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
210
211 static gfp_t gfp_flags_quirk;
212
its_alloc_pages_node(int node,gfp_t gfp,unsigned int order)213 static struct page *its_alloc_pages_node(int node, gfp_t gfp,
214 unsigned int order)
215 {
216 struct page *page;
217 int ret = 0;
218
219 page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
220
221 if (!page)
222 return NULL;
223
224 ret = set_memory_decrypted((unsigned long)page_address(page),
225 1 << order);
226 /*
227 * If set_memory_decrypted() fails then we don't know what state the
228 * page is in, so we can't free it. Instead we leak it.
229 * set_memory_decrypted() will already have WARNed.
230 */
231 if (ret)
232 return NULL;
233
234 return page;
235 }
236
its_alloc_pages(gfp_t gfp,unsigned int order)237 static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
238 {
239 return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
240 }
241
its_free_pages(void * addr,unsigned int order)242 static void its_free_pages(void *addr, unsigned int order)
243 {
244 /*
245 * If the memory cannot be encrypted again then we must leak the pages.
246 * set_memory_encrypted() will already have WARNed.
247 */
248 if (set_memory_encrypted((unsigned long)addr, 1 << order))
249 return;
250 free_pages((unsigned long)addr, order);
251 }
252
253 static struct gen_pool *itt_pool;
254
itt_alloc_pool(int node,int size)255 static void *itt_alloc_pool(int node, int size)
256 {
257 unsigned long addr;
258 struct page *page;
259
260 if (size >= PAGE_SIZE) {
261 page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
262
263 return page ? page_address(page) : NULL;
264 }
265
266 do {
267 addr = gen_pool_alloc(itt_pool, size);
268 if (addr)
269 break;
270
271 page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
272 if (!page)
273 break;
274
275 gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
276 } while (!addr);
277
278 return (void *)addr;
279 }
280
itt_free_pool(void * addr,int size)281 static void itt_free_pool(void *addr, int size)
282 {
283 if (!addr)
284 return;
285
286 if (size >= PAGE_SIZE) {
287 its_free_pages(addr, get_order(size));
288 return;
289 }
290
291 gen_pool_free(itt_pool, (unsigned long)addr, size);
292 }
293
294 /*
295 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
296 * always have vSGIs mapped.
297 */
require_its_list_vmovp(struct its_vm * vm,struct its_node * its)298 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
299 {
300 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
301 }
302
rdists_support_shareable(void)303 static bool rdists_support_shareable(void)
304 {
305 return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
306 }
307
get_its_list(struct its_vm * vm)308 static u16 get_its_list(struct its_vm *vm)
309 {
310 struct its_node *its;
311 unsigned long its_list = 0;
312
313 list_for_each_entry(its, &its_nodes, entry) {
314 if (!is_v4(its))
315 continue;
316
317 if (require_its_list_vmovp(vm, its))
318 __set_bit(its->list_nr, &its_list);
319 }
320
321 return (u16)its_list;
322 }
323
its_get_event_id(struct irq_data * d)324 static inline u32 its_get_event_id(struct irq_data *d)
325 {
326 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
327 return d->hwirq - its_dev->event_map.lpi_base;
328 }
329
dev_event_to_col(struct its_device * its_dev,u32 event)330 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
331 u32 event)
332 {
333 struct its_node *its = its_dev->its;
334
335 return its->collections + its_dev->event_map.col_map[event];
336 }
337
dev_event_to_vlpi_map(struct its_device * its_dev,u32 event)338 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
339 u32 event)
340 {
341 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
342 return NULL;
343
344 return &its_dev->event_map.vlpi_maps[event];
345 }
346
get_vlpi_map(struct irq_data * d)347 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
348 {
349 if (irqd_is_forwarded_to_vcpu(d)) {
350 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
351 u32 event = its_get_event_id(d);
352
353 return dev_event_to_vlpi_map(its_dev, event);
354 }
355
356 return NULL;
357 }
358
vpe_to_cpuid_lock(struct its_vpe * vpe,unsigned long * flags)359 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
360 {
361 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
362 return vpe->col_idx;
363 }
364
vpe_to_cpuid_unlock(struct its_vpe * vpe,unsigned long flags)365 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
366 {
367 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
368 }
369
370 static struct irq_chip its_vpe_irq_chip;
371
irq_to_cpuid_lock(struct irq_data * d,unsigned long * flags)372 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
373 {
374 struct its_vpe *vpe = NULL;
375 int cpu;
376
377 if (d->chip == &its_vpe_irq_chip) {
378 vpe = irq_data_get_irq_chip_data(d);
379 } else {
380 struct its_vlpi_map *map = get_vlpi_map(d);
381 if (map)
382 vpe = map->vpe;
383 }
384
385 if (vpe) {
386 cpu = vpe_to_cpuid_lock(vpe, flags);
387 } else {
388 /* Physical LPIs are already locked via the irq_desc lock */
389 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
390 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
391 /* Keep GCC quiet... */
392 *flags = 0;
393 }
394
395 return cpu;
396 }
397
irq_to_cpuid_unlock(struct irq_data * d,unsigned long flags)398 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
399 {
400 struct its_vpe *vpe = NULL;
401
402 if (d->chip == &its_vpe_irq_chip) {
403 vpe = irq_data_get_irq_chip_data(d);
404 } else {
405 struct its_vlpi_map *map = get_vlpi_map(d);
406 if (map)
407 vpe = map->vpe;
408 }
409
410 if (vpe)
411 vpe_to_cpuid_unlock(vpe, flags);
412 }
413
valid_col(struct its_collection * col)414 static struct its_collection *valid_col(struct its_collection *col)
415 {
416 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
417 return NULL;
418
419 return col;
420 }
421
valid_vpe(struct its_node * its,struct its_vpe * vpe)422 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
423 {
424 if (valid_col(its->collections + vpe->col_idx))
425 return vpe;
426
427 return NULL;
428 }
429
430 /*
431 * ITS command descriptors - parameters to be encoded in a command
432 * block.
433 */
434 struct its_cmd_desc {
435 union {
436 struct {
437 struct its_device *dev;
438 u32 event_id;
439 } its_inv_cmd;
440
441 struct {
442 struct its_device *dev;
443 u32 event_id;
444 } its_clear_cmd;
445
446 struct {
447 struct its_device *dev;
448 u32 event_id;
449 } its_int_cmd;
450
451 struct {
452 struct its_device *dev;
453 int valid;
454 } its_mapd_cmd;
455
456 struct {
457 struct its_collection *col;
458 int valid;
459 } its_mapc_cmd;
460
461 struct {
462 struct its_device *dev;
463 u32 phys_id;
464 u32 event_id;
465 } its_mapti_cmd;
466
467 struct {
468 struct its_device *dev;
469 struct its_collection *col;
470 u32 event_id;
471 } its_movi_cmd;
472
473 struct {
474 struct its_device *dev;
475 u32 event_id;
476 } its_discard_cmd;
477
478 struct {
479 struct its_collection *col;
480 } its_invall_cmd;
481
482 struct {
483 struct its_vpe *vpe;
484 } its_vinvall_cmd;
485
486 struct {
487 struct its_vpe *vpe;
488 struct its_collection *col;
489 bool valid;
490 } its_vmapp_cmd;
491
492 struct {
493 struct its_vpe *vpe;
494 struct its_device *dev;
495 u32 virt_id;
496 u32 event_id;
497 bool db_enabled;
498 } its_vmapti_cmd;
499
500 struct {
501 struct its_vpe *vpe;
502 struct its_device *dev;
503 u32 event_id;
504 bool db_enabled;
505 } its_vmovi_cmd;
506
507 struct {
508 struct its_vpe *vpe;
509 struct its_collection *col;
510 u16 seq_num;
511 u16 its_list;
512 } its_vmovp_cmd;
513
514 struct {
515 struct its_vpe *vpe;
516 } its_invdb_cmd;
517
518 struct {
519 struct its_vpe *vpe;
520 u8 sgi;
521 u8 priority;
522 bool enable;
523 bool group;
524 bool clear;
525 } its_vsgi_cmd;
526 };
527 };
528
529 /*
530 * The ITS command block, which is what the ITS actually parses.
531 */
532 struct its_cmd_block {
533 union {
534 u64 raw_cmd[4];
535 __le64 raw_cmd_le[4];
536 };
537 };
538
539 #define ITS_CMD_QUEUE_SZ SZ_64K
540 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
541
542 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
543 struct its_cmd_block *,
544 struct its_cmd_desc *);
545
546 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
547 struct its_cmd_block *,
548 struct its_cmd_desc *);
549
its_mask_encode(u64 * raw_cmd,u64 val,int h,int l)550 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
551 {
552 u64 mask = GENMASK_ULL(h, l);
553 *raw_cmd &= ~mask;
554 *raw_cmd |= (val << l) & mask;
555 }
556
its_encode_cmd(struct its_cmd_block * cmd,u8 cmd_nr)557 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
558 {
559 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
560 }
561
its_encode_devid(struct its_cmd_block * cmd,u32 devid)562 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
563 {
564 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
565 }
566
its_encode_event_id(struct its_cmd_block * cmd,u32 id)567 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
568 {
569 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
570 }
571
its_encode_phys_id(struct its_cmd_block * cmd,u32 phys_id)572 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
573 {
574 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
575 }
576
its_encode_size(struct its_cmd_block * cmd,u8 size)577 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
578 {
579 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
580 }
581
its_encode_itt(struct its_cmd_block * cmd,u64 itt_addr)582 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
583 {
584 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
585 }
586
its_encode_valid(struct its_cmd_block * cmd,int valid)587 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
588 {
589 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
590 }
591
its_encode_target(struct its_cmd_block * cmd,u64 target_addr)592 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
593 {
594 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
595 }
596
its_encode_collection(struct its_cmd_block * cmd,u16 col)597 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
598 {
599 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
600 }
601
its_encode_vpeid(struct its_cmd_block * cmd,u16 vpeid)602 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
603 {
604 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
605 }
606
its_encode_virt_id(struct its_cmd_block * cmd,u32 virt_id)607 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
608 {
609 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
610 }
611
its_encode_db_phys_id(struct its_cmd_block * cmd,u32 db_phys_id)612 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
613 {
614 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
615 }
616
its_encode_db_valid(struct its_cmd_block * cmd,bool db_valid)617 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
618 {
619 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
620 }
621
its_encode_seq_num(struct its_cmd_block * cmd,u16 seq_num)622 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
623 {
624 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
625 }
626
its_encode_its_list(struct its_cmd_block * cmd,u16 its_list)627 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
628 {
629 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
630 }
631
its_encode_vpt_addr(struct its_cmd_block * cmd,u64 vpt_pa)632 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
633 {
634 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
635 }
636
its_encode_vpt_size(struct its_cmd_block * cmd,u8 vpt_size)637 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
638 {
639 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
640 }
641
its_encode_vconf_addr(struct its_cmd_block * cmd,u64 vconf_pa)642 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
643 {
644 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
645 }
646
its_encode_alloc(struct its_cmd_block * cmd,bool alloc)647 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
648 {
649 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
650 }
651
its_encode_ptz(struct its_cmd_block * cmd,bool ptz)652 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
653 {
654 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
655 }
656
its_encode_vmapp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)657 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
658 u32 vpe_db_lpi)
659 {
660 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
661 }
662
its_encode_vmovp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)663 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
664 u32 vpe_db_lpi)
665 {
666 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
667 }
668
its_encode_db(struct its_cmd_block * cmd,bool db)669 static void its_encode_db(struct its_cmd_block *cmd, bool db)
670 {
671 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
672 }
673
its_encode_sgi_intid(struct its_cmd_block * cmd,u8 sgi)674 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
675 {
676 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
677 }
678
its_encode_sgi_priority(struct its_cmd_block * cmd,u8 prio)679 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
680 {
681 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
682 }
683
its_encode_sgi_group(struct its_cmd_block * cmd,bool grp)684 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
685 {
686 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
687 }
688
its_encode_sgi_clear(struct its_cmd_block * cmd,bool clr)689 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
690 {
691 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
692 }
693
its_encode_sgi_enable(struct its_cmd_block * cmd,bool en)694 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
695 {
696 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
697 }
698
its_fixup_cmd(struct its_cmd_block * cmd)699 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
700 {
701 /* Let's fixup BE commands */
702 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
703 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
704 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
705 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
706 }
707
its_build_mapd_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)708 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
709 struct its_cmd_block *cmd,
710 struct its_cmd_desc *desc)
711 {
712 unsigned long itt_addr;
713 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
714
715 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
716
717 its_encode_cmd(cmd, GITS_CMD_MAPD);
718 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
719 its_encode_size(cmd, size - 1);
720 its_encode_itt(cmd, itt_addr);
721 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
722
723 its_fixup_cmd(cmd);
724
725 return NULL;
726 }
727
its_build_mapc_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)728 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
729 struct its_cmd_block *cmd,
730 struct its_cmd_desc *desc)
731 {
732 its_encode_cmd(cmd, GITS_CMD_MAPC);
733 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
734 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
735 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
736
737 its_fixup_cmd(cmd);
738
739 return desc->its_mapc_cmd.col;
740 }
741
its_build_mapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)742 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
743 struct its_cmd_block *cmd,
744 struct its_cmd_desc *desc)
745 {
746 struct its_collection *col;
747
748 col = dev_event_to_col(desc->its_mapti_cmd.dev,
749 desc->its_mapti_cmd.event_id);
750
751 its_encode_cmd(cmd, GITS_CMD_MAPTI);
752 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
753 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
754 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
755 its_encode_collection(cmd, col->col_id);
756
757 its_fixup_cmd(cmd);
758
759 return valid_col(col);
760 }
761
its_build_movi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)762 static struct its_collection *its_build_movi_cmd(struct its_node *its,
763 struct its_cmd_block *cmd,
764 struct its_cmd_desc *desc)
765 {
766 struct its_collection *col;
767
768 col = dev_event_to_col(desc->its_movi_cmd.dev,
769 desc->its_movi_cmd.event_id);
770
771 its_encode_cmd(cmd, GITS_CMD_MOVI);
772 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
773 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
774 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
775
776 its_fixup_cmd(cmd);
777
778 return valid_col(col);
779 }
780
its_build_discard_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)781 static struct its_collection *its_build_discard_cmd(struct its_node *its,
782 struct its_cmd_block *cmd,
783 struct its_cmd_desc *desc)
784 {
785 struct its_collection *col;
786
787 col = dev_event_to_col(desc->its_discard_cmd.dev,
788 desc->its_discard_cmd.event_id);
789
790 its_encode_cmd(cmd, GITS_CMD_DISCARD);
791 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
792 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
793
794 its_fixup_cmd(cmd);
795
796 return valid_col(col);
797 }
798
its_build_inv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)799 static struct its_collection *its_build_inv_cmd(struct its_node *its,
800 struct its_cmd_block *cmd,
801 struct its_cmd_desc *desc)
802 {
803 struct its_collection *col;
804
805 col = dev_event_to_col(desc->its_inv_cmd.dev,
806 desc->its_inv_cmd.event_id);
807
808 its_encode_cmd(cmd, GITS_CMD_INV);
809 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
810 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
811
812 its_fixup_cmd(cmd);
813
814 return valid_col(col);
815 }
816
its_build_int_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)817 static struct its_collection *its_build_int_cmd(struct its_node *its,
818 struct its_cmd_block *cmd,
819 struct its_cmd_desc *desc)
820 {
821 struct its_collection *col;
822
823 col = dev_event_to_col(desc->its_int_cmd.dev,
824 desc->its_int_cmd.event_id);
825
826 its_encode_cmd(cmd, GITS_CMD_INT);
827 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
828 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
829
830 its_fixup_cmd(cmd);
831
832 return valid_col(col);
833 }
834
its_build_clear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)835 static struct its_collection *its_build_clear_cmd(struct its_node *its,
836 struct its_cmd_block *cmd,
837 struct its_cmd_desc *desc)
838 {
839 struct its_collection *col;
840
841 col = dev_event_to_col(desc->its_clear_cmd.dev,
842 desc->its_clear_cmd.event_id);
843
844 its_encode_cmd(cmd, GITS_CMD_CLEAR);
845 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
846 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
847
848 its_fixup_cmd(cmd);
849
850 return valid_col(col);
851 }
852
its_build_invall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)853 static struct its_collection *its_build_invall_cmd(struct its_node *its,
854 struct its_cmd_block *cmd,
855 struct its_cmd_desc *desc)
856 {
857 its_encode_cmd(cmd, GITS_CMD_INVALL);
858 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
859
860 its_fixup_cmd(cmd);
861
862 return desc->its_invall_cmd.col;
863 }
864
its_build_vinvall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)865 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
866 struct its_cmd_block *cmd,
867 struct its_cmd_desc *desc)
868 {
869 its_encode_cmd(cmd, GITS_CMD_VINVALL);
870 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
871
872 its_fixup_cmd(cmd);
873
874 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
875 }
876
its_build_vmapp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)877 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
878 struct its_cmd_block *cmd,
879 struct its_cmd_desc *desc)
880 {
881 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
882 unsigned long vpt_addr, vconf_addr;
883 u64 target;
884 bool alloc;
885
886 its_encode_cmd(cmd, GITS_CMD_VMAPP);
887 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
888 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
889
890 if (!desc->its_vmapp_cmd.valid) {
891 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
892 if (is_v4_1(its)) {
893 its_encode_alloc(cmd, alloc);
894 /*
895 * Unmapping a VPE is self-synchronizing on GICv4.1,
896 * no need to issue a VSYNC.
897 */
898 vpe = NULL;
899 }
900
901 goto out;
902 }
903
904 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
905 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
906
907 its_encode_target(cmd, target);
908 its_encode_vpt_addr(cmd, vpt_addr);
909 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
910
911 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
912
913 if (!is_v4_1(its))
914 goto out;
915
916 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
917
918 its_encode_alloc(cmd, alloc);
919
920 /*
921 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
922 * to be unmapped first, and in this case, we may remap the vPE
923 * back while the VPT is not empty. So we can't assume that the
924 * VPT is empty on map. This is why we never advertise PTZ.
925 */
926 its_encode_ptz(cmd, false);
927 its_encode_vconf_addr(cmd, vconf_addr);
928 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
929
930 out:
931 its_fixup_cmd(cmd);
932
933 return vpe;
934 }
935
its_build_vmapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)936 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
937 struct its_cmd_block *cmd,
938 struct its_cmd_desc *desc)
939 {
940 u32 db;
941
942 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
943 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
944 else
945 db = 1023;
946
947 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
948 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
949 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
950 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
951 its_encode_db_phys_id(cmd, db);
952 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
953
954 its_fixup_cmd(cmd);
955
956 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
957 }
958
its_build_vmovi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)959 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
960 struct its_cmd_block *cmd,
961 struct its_cmd_desc *desc)
962 {
963 u32 db;
964
965 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
966 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
967 else
968 db = 1023;
969
970 its_encode_cmd(cmd, GITS_CMD_VMOVI);
971 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
972 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
973 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
974 its_encode_db_phys_id(cmd, db);
975 its_encode_db_valid(cmd, true);
976
977 its_fixup_cmd(cmd);
978
979 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
980 }
981
its_build_vmovp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)982 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
983 struct its_cmd_block *cmd,
984 struct its_cmd_desc *desc)
985 {
986 u64 target;
987
988 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
989 its_encode_cmd(cmd, GITS_CMD_VMOVP);
990 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
991 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
992 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
993 its_encode_target(cmd, target);
994
995 if (is_v4_1(its)) {
996 its_encode_db(cmd, true);
997 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
998 }
999
1000 its_fixup_cmd(cmd);
1001
1002 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
1003 }
1004
its_build_vinv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1005 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
1006 struct its_cmd_block *cmd,
1007 struct its_cmd_desc *desc)
1008 {
1009 struct its_vlpi_map *map;
1010
1011 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
1012 desc->its_inv_cmd.event_id);
1013
1014 its_encode_cmd(cmd, GITS_CMD_INV);
1015 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
1016 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
1017
1018 its_fixup_cmd(cmd);
1019
1020 return valid_vpe(its, map->vpe);
1021 }
1022
its_build_vint_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1023 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
1024 struct its_cmd_block *cmd,
1025 struct its_cmd_desc *desc)
1026 {
1027 struct its_vlpi_map *map;
1028
1029 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
1030 desc->its_int_cmd.event_id);
1031
1032 its_encode_cmd(cmd, GITS_CMD_INT);
1033 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
1034 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
1035
1036 its_fixup_cmd(cmd);
1037
1038 return valid_vpe(its, map->vpe);
1039 }
1040
its_build_vclear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1041 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
1042 struct its_cmd_block *cmd,
1043 struct its_cmd_desc *desc)
1044 {
1045 struct its_vlpi_map *map;
1046
1047 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
1048 desc->its_clear_cmd.event_id);
1049
1050 its_encode_cmd(cmd, GITS_CMD_CLEAR);
1051 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
1052 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
1053
1054 its_fixup_cmd(cmd);
1055
1056 return valid_vpe(its, map->vpe);
1057 }
1058
its_build_invdb_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1059 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
1060 struct its_cmd_block *cmd,
1061 struct its_cmd_desc *desc)
1062 {
1063 if (WARN_ON(!is_v4_1(its)))
1064 return NULL;
1065
1066 its_encode_cmd(cmd, GITS_CMD_INVDB);
1067 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
1068
1069 its_fixup_cmd(cmd);
1070
1071 return valid_vpe(its, desc->its_invdb_cmd.vpe);
1072 }
1073
its_build_vsgi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1074 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
1075 struct its_cmd_block *cmd,
1076 struct its_cmd_desc *desc)
1077 {
1078 if (WARN_ON(!is_v4_1(its)))
1079 return NULL;
1080
1081 its_encode_cmd(cmd, GITS_CMD_VSGI);
1082 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
1083 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
1084 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
1085 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
1086 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
1087 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
1088
1089 its_fixup_cmd(cmd);
1090
1091 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1092 }
1093
its_cmd_ptr_to_offset(struct its_node * its,struct its_cmd_block * ptr)1094 static u64 its_cmd_ptr_to_offset(struct its_node *its,
1095 struct its_cmd_block *ptr)
1096 {
1097 return (ptr - its->cmd_base) * sizeof(*ptr);
1098 }
1099
its_queue_full(struct its_node * its)1100 static int its_queue_full(struct its_node *its)
1101 {
1102 int widx;
1103 int ridx;
1104
1105 widx = its->cmd_write - its->cmd_base;
1106 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1107
1108 /* This is incredibly unlikely to happen, unless the ITS locks up. */
1109 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1110 return 1;
1111
1112 return 0;
1113 }
1114
its_allocate_entry(struct its_node * its)1115 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1116 {
1117 struct its_cmd_block *cmd;
1118 u32 count = 1000000; /* 1s! */
1119
1120 while (its_queue_full(its)) {
1121 count--;
1122 if (!count) {
1123 pr_err_ratelimited("ITS queue not draining\n");
1124 return NULL;
1125 }
1126 cpu_relax();
1127 udelay(1);
1128 }
1129
1130 cmd = its->cmd_write++;
1131
1132 /* Handle queue wrapping */
1133 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1134 its->cmd_write = its->cmd_base;
1135
1136 /* Clear command */
1137 cmd->raw_cmd[0] = 0;
1138 cmd->raw_cmd[1] = 0;
1139 cmd->raw_cmd[2] = 0;
1140 cmd->raw_cmd[3] = 0;
1141
1142 return cmd;
1143 }
1144
its_post_commands(struct its_node * its)1145 static struct its_cmd_block *its_post_commands(struct its_node *its)
1146 {
1147 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1148
1149 writel_relaxed(wr, its->base + GITS_CWRITER);
1150
1151 return its->cmd_write;
1152 }
1153
its_flush_cmd(struct its_node * its,struct its_cmd_block * cmd)1154 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1155 {
1156 /*
1157 * Make sure the commands written to memory are observable by
1158 * the ITS.
1159 */
1160 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1161 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1162 else
1163 dsb(ishst);
1164 }
1165
its_wait_for_range_completion(struct its_node * its,u64 prev_idx,struct its_cmd_block * to)1166 static int its_wait_for_range_completion(struct its_node *its,
1167 u64 prev_idx,
1168 struct its_cmd_block *to)
1169 {
1170 u64 rd_idx, to_idx, linear_idx;
1171 u32 count = 1000000; /* 1s! */
1172
1173 /* Linearize to_idx if the command set has wrapped around */
1174 to_idx = its_cmd_ptr_to_offset(its, to);
1175 if (to_idx < prev_idx)
1176 to_idx += ITS_CMD_QUEUE_SZ;
1177
1178 linear_idx = prev_idx;
1179
1180 while (1) {
1181 s64 delta;
1182
1183 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1184
1185 /*
1186 * Compute the read pointer progress, taking the
1187 * potential wrap-around into account.
1188 */
1189 delta = rd_idx - prev_idx;
1190 if (rd_idx < prev_idx)
1191 delta += ITS_CMD_QUEUE_SZ;
1192
1193 linear_idx += delta;
1194 if (linear_idx >= to_idx)
1195 break;
1196
1197 count--;
1198 if (!count) {
1199 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1200 to_idx, linear_idx);
1201 return -1;
1202 }
1203 prev_idx = rd_idx;
1204 cpu_relax();
1205 udelay(1);
1206 }
1207
1208 return 0;
1209 }
1210
1211 /* Warning, macro hell follows */
1212 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1213 void name(struct its_node *its, \
1214 buildtype builder, \
1215 struct its_cmd_desc *desc) \
1216 { \
1217 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1218 synctype *sync_obj; \
1219 unsigned long flags; \
1220 u64 rd_idx; \
1221 \
1222 raw_spin_lock_irqsave(&its->lock, flags); \
1223 \
1224 cmd = its_allocate_entry(its); \
1225 if (!cmd) { /* We're soooooo screewed... */ \
1226 raw_spin_unlock_irqrestore(&its->lock, flags); \
1227 return; \
1228 } \
1229 sync_obj = builder(its, cmd, desc); \
1230 its_flush_cmd(its, cmd); \
1231 \
1232 if (sync_obj) { \
1233 sync_cmd = its_allocate_entry(its); \
1234 if (!sync_cmd) \
1235 goto post; \
1236 \
1237 buildfn(its, sync_cmd, sync_obj); \
1238 its_flush_cmd(its, sync_cmd); \
1239 } \
1240 \
1241 post: \
1242 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1243 next_cmd = its_post_commands(its); \
1244 raw_spin_unlock_irqrestore(&its->lock, flags); \
1245 \
1246 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1247 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1248 }
1249
its_build_sync_cmd(struct its_node * its,struct its_cmd_block * sync_cmd,struct its_collection * sync_col)1250 static void its_build_sync_cmd(struct its_node *its,
1251 struct its_cmd_block *sync_cmd,
1252 struct its_collection *sync_col)
1253 {
1254 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1255 its_encode_target(sync_cmd, sync_col->target_address);
1256
1257 its_fixup_cmd(sync_cmd);
1258 }
1259
BUILD_SINGLE_CMD_FUNC(its_send_single_command,its_cmd_builder_t,struct its_collection,its_build_sync_cmd)1260 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1261 struct its_collection, its_build_sync_cmd)
1262
1263 static void its_build_vsync_cmd(struct its_node *its,
1264 struct its_cmd_block *sync_cmd,
1265 struct its_vpe *sync_vpe)
1266 {
1267 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1268 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1269
1270 its_fixup_cmd(sync_cmd);
1271 }
1272
BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand,its_cmd_vbuilder_t,struct its_vpe,its_build_vsync_cmd)1273 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1274 struct its_vpe, its_build_vsync_cmd)
1275
1276 static void its_send_int(struct its_device *dev, u32 event_id)
1277 {
1278 struct its_cmd_desc desc;
1279
1280 desc.its_int_cmd.dev = dev;
1281 desc.its_int_cmd.event_id = event_id;
1282
1283 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1284 }
1285
its_send_clear(struct its_device * dev,u32 event_id)1286 static void its_send_clear(struct its_device *dev, u32 event_id)
1287 {
1288 struct its_cmd_desc desc;
1289
1290 desc.its_clear_cmd.dev = dev;
1291 desc.its_clear_cmd.event_id = event_id;
1292
1293 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1294 }
1295
its_send_inv(struct its_device * dev,u32 event_id)1296 static void its_send_inv(struct its_device *dev, u32 event_id)
1297 {
1298 struct its_cmd_desc desc;
1299
1300 desc.its_inv_cmd.dev = dev;
1301 desc.its_inv_cmd.event_id = event_id;
1302
1303 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1304 }
1305
its_send_mapd(struct its_device * dev,int valid)1306 static void its_send_mapd(struct its_device *dev, int valid)
1307 {
1308 struct its_cmd_desc desc;
1309
1310 desc.its_mapd_cmd.dev = dev;
1311 desc.its_mapd_cmd.valid = !!valid;
1312
1313 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1314 }
1315
its_send_mapc(struct its_node * its,struct its_collection * col,int valid)1316 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1317 int valid)
1318 {
1319 struct its_cmd_desc desc;
1320
1321 desc.its_mapc_cmd.col = col;
1322 desc.its_mapc_cmd.valid = !!valid;
1323
1324 its_send_single_command(its, its_build_mapc_cmd, &desc);
1325 }
1326
its_send_mapti(struct its_device * dev,u32 irq_id,u32 id)1327 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1328 {
1329 struct its_cmd_desc desc;
1330
1331 desc.its_mapti_cmd.dev = dev;
1332 desc.its_mapti_cmd.phys_id = irq_id;
1333 desc.its_mapti_cmd.event_id = id;
1334
1335 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1336 }
1337
its_send_movi(struct its_device * dev,struct its_collection * col,u32 id)1338 static void its_send_movi(struct its_device *dev,
1339 struct its_collection *col, u32 id)
1340 {
1341 struct its_cmd_desc desc;
1342
1343 desc.its_movi_cmd.dev = dev;
1344 desc.its_movi_cmd.col = col;
1345 desc.its_movi_cmd.event_id = id;
1346
1347 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1348 }
1349
its_send_discard(struct its_device * dev,u32 id)1350 static void its_send_discard(struct its_device *dev, u32 id)
1351 {
1352 struct its_cmd_desc desc;
1353
1354 desc.its_discard_cmd.dev = dev;
1355 desc.its_discard_cmd.event_id = id;
1356
1357 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1358 }
1359
its_send_invall(struct its_node * its,struct its_collection * col)1360 static void its_send_invall(struct its_node *its, struct its_collection *col)
1361 {
1362 struct its_cmd_desc desc;
1363
1364 desc.its_invall_cmd.col = col;
1365
1366 its_send_single_command(its, its_build_invall_cmd, &desc);
1367 }
1368
its_send_vmapti(struct its_device * dev,u32 id)1369 static void its_send_vmapti(struct its_device *dev, u32 id)
1370 {
1371 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1372 struct its_cmd_desc desc;
1373
1374 desc.its_vmapti_cmd.vpe = map->vpe;
1375 desc.its_vmapti_cmd.dev = dev;
1376 desc.its_vmapti_cmd.virt_id = map->vintid;
1377 desc.its_vmapti_cmd.event_id = id;
1378 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1379
1380 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1381 }
1382
its_send_vmovi(struct its_device * dev,u32 id)1383 static void its_send_vmovi(struct its_device *dev, u32 id)
1384 {
1385 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1386 struct its_cmd_desc desc;
1387
1388 desc.its_vmovi_cmd.vpe = map->vpe;
1389 desc.its_vmovi_cmd.dev = dev;
1390 desc.its_vmovi_cmd.event_id = id;
1391 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1392
1393 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1394 }
1395
its_send_vmapp(struct its_node * its,struct its_vpe * vpe,bool valid)1396 static void its_send_vmapp(struct its_node *its,
1397 struct its_vpe *vpe, bool valid)
1398 {
1399 struct its_cmd_desc desc;
1400
1401 desc.its_vmapp_cmd.vpe = vpe;
1402 desc.its_vmapp_cmd.valid = valid;
1403 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1404
1405 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1406 }
1407
its_send_vmovp(struct its_vpe * vpe)1408 static void its_send_vmovp(struct its_vpe *vpe)
1409 {
1410 struct its_cmd_desc desc = {};
1411 struct its_node *its;
1412 int col_id = vpe->col_idx;
1413
1414 desc.its_vmovp_cmd.vpe = vpe;
1415
1416 if (!its_list_map) {
1417 its = list_first_entry(&its_nodes, struct its_node, entry);
1418 desc.its_vmovp_cmd.col = &its->collections[col_id];
1419 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1420 return;
1421 }
1422
1423 /*
1424 * Yet another marvel of the architecture. If using the
1425 * its_list "feature", we need to make sure that all ITSs
1426 * receive all VMOVP commands in the same order. The only way
1427 * to guarantee this is to make vmovp a serialization point.
1428 *
1429 * Wall <-- Head.
1430 */
1431 guard(raw_spinlock)(&vmovp_lock);
1432 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1433 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1434
1435 /* Emit VMOVPs */
1436 list_for_each_entry(its, &its_nodes, entry) {
1437 if (!is_v4(its))
1438 continue;
1439
1440 if (!require_its_list_vmovp(vpe->its_vm, its))
1441 continue;
1442
1443 desc.its_vmovp_cmd.col = &its->collections[col_id];
1444 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1445 }
1446 }
1447
its_send_vinvall(struct its_node * its,struct its_vpe * vpe)1448 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1449 {
1450 struct its_cmd_desc desc;
1451
1452 desc.its_vinvall_cmd.vpe = vpe;
1453 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1454 }
1455
its_send_vinv(struct its_device * dev,u32 event_id)1456 static void its_send_vinv(struct its_device *dev, u32 event_id)
1457 {
1458 struct its_cmd_desc desc;
1459
1460 /*
1461 * There is no real VINV command. This is just a normal INV,
1462 * with a VSYNC instead of a SYNC.
1463 */
1464 desc.its_inv_cmd.dev = dev;
1465 desc.its_inv_cmd.event_id = event_id;
1466
1467 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1468 }
1469
its_send_vint(struct its_device * dev,u32 event_id)1470 static void its_send_vint(struct its_device *dev, u32 event_id)
1471 {
1472 struct its_cmd_desc desc;
1473
1474 /*
1475 * There is no real VINT command. This is just a normal INT,
1476 * with a VSYNC instead of a SYNC.
1477 */
1478 desc.its_int_cmd.dev = dev;
1479 desc.its_int_cmd.event_id = event_id;
1480
1481 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1482 }
1483
its_send_vclear(struct its_device * dev,u32 event_id)1484 static void its_send_vclear(struct its_device *dev, u32 event_id)
1485 {
1486 struct its_cmd_desc desc;
1487
1488 /*
1489 * There is no real VCLEAR command. This is just a normal CLEAR,
1490 * with a VSYNC instead of a SYNC.
1491 */
1492 desc.its_clear_cmd.dev = dev;
1493 desc.its_clear_cmd.event_id = event_id;
1494
1495 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1496 }
1497
its_send_invdb(struct its_node * its,struct its_vpe * vpe)1498 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1499 {
1500 struct its_cmd_desc desc;
1501
1502 desc.its_invdb_cmd.vpe = vpe;
1503 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1504 }
1505
1506 /*
1507 * irqchip functions - assumes MSI, mostly.
1508 */
lpi_write_config(struct irq_data * d,u8 clr,u8 set)1509 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1510 {
1511 struct its_vlpi_map *map = get_vlpi_map(d);
1512 irq_hw_number_t hwirq;
1513 void *va;
1514 u8 *cfg;
1515
1516 if (map) {
1517 va = page_address(map->vm->vprop_page);
1518 hwirq = map->vintid;
1519
1520 /* Remember the updated property */
1521 map->properties &= ~clr;
1522 map->properties |= set | LPI_PROP_GROUP1;
1523 } else {
1524 va = gic_rdists->prop_table_va;
1525 hwirq = d->hwirq;
1526 }
1527
1528 cfg = va + hwirq - 8192;
1529 *cfg &= ~clr;
1530 *cfg |= set | LPI_PROP_GROUP1;
1531
1532 /*
1533 * Make the above write visible to the redistributors.
1534 * And yes, we're flushing exactly: One. Single. Byte.
1535 * Humpf...
1536 */
1537 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1538 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1539 else
1540 dsb(ishst);
1541 }
1542
wait_for_syncr(void __iomem * rdbase)1543 static void wait_for_syncr(void __iomem *rdbase)
1544 {
1545 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1546 cpu_relax();
1547 }
1548
__direct_lpi_inv(struct irq_data * d,u64 val)1549 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1550 {
1551 void __iomem *rdbase;
1552 unsigned long flags;
1553 int cpu;
1554
1555 /* Target the redistributor this LPI is currently routed to */
1556 cpu = irq_to_cpuid_lock(d, &flags);
1557 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1558
1559 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1560 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1561 wait_for_syncr(rdbase);
1562
1563 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1564 irq_to_cpuid_unlock(d, flags);
1565 }
1566
direct_lpi_inv(struct irq_data * d)1567 static void direct_lpi_inv(struct irq_data *d)
1568 {
1569 struct its_vlpi_map *map = get_vlpi_map(d);
1570 u64 val;
1571
1572 if (map) {
1573 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1574
1575 WARN_ON(!is_v4_1(its_dev->its));
1576
1577 val = GICR_INVLPIR_V;
1578 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1579 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1580 } else {
1581 val = d->hwirq;
1582 }
1583
1584 __direct_lpi_inv(d, val);
1585 }
1586
lpi_update_config(struct irq_data * d,u8 clr,u8 set)1587 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1588 {
1589 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1590
1591 lpi_write_config(d, clr, set);
1592 if (gic_rdists->has_direct_lpi &&
1593 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1594 direct_lpi_inv(d);
1595 else if (!irqd_is_forwarded_to_vcpu(d))
1596 its_send_inv(its_dev, its_get_event_id(d));
1597 else
1598 its_send_vinv(its_dev, its_get_event_id(d));
1599 }
1600
its_vlpi_set_doorbell(struct irq_data * d,bool enable)1601 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1602 {
1603 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1604 u32 event = its_get_event_id(d);
1605 struct its_vlpi_map *map;
1606
1607 /*
1608 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1609 * here.
1610 */
1611 if (is_v4_1(its_dev->its))
1612 return;
1613
1614 map = dev_event_to_vlpi_map(its_dev, event);
1615
1616 if (map->db_enabled == enable)
1617 return;
1618
1619 map->db_enabled = enable;
1620
1621 /*
1622 * More fun with the architecture:
1623 *
1624 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1625 * value or to 1023, depending on the enable bit. But that
1626 * would be issuing a mapping for an /existing/ DevID+EventID
1627 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1628 * to the /same/ vPE, using this opportunity to adjust the
1629 * doorbell. Mouahahahaha. We loves it, Precious.
1630 */
1631 its_send_vmovi(its_dev, event);
1632 }
1633
its_mask_irq(struct irq_data * d)1634 static void its_mask_irq(struct irq_data *d)
1635 {
1636 if (irqd_is_forwarded_to_vcpu(d))
1637 its_vlpi_set_doorbell(d, false);
1638
1639 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1640 }
1641
its_unmask_irq(struct irq_data * d)1642 static void its_unmask_irq(struct irq_data *d)
1643 {
1644 if (irqd_is_forwarded_to_vcpu(d))
1645 its_vlpi_set_doorbell(d, true);
1646
1647 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1648 }
1649
its_read_lpi_count(struct irq_data * d,int cpu)1650 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1651 {
1652 if (irqd_affinity_is_managed(d))
1653 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1654
1655 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1656 }
1657
its_inc_lpi_count(struct irq_data * d,int cpu)1658 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1659 {
1660 if (irqd_affinity_is_managed(d))
1661 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1662 else
1663 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1664 }
1665
its_dec_lpi_count(struct irq_data * d,int cpu)1666 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1667 {
1668 if (irqd_affinity_is_managed(d))
1669 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1670 else
1671 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1672 }
1673
cpumask_pick_least_loaded(struct irq_data * d,const struct cpumask * cpu_mask)1674 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1675 const struct cpumask *cpu_mask)
1676 {
1677 unsigned int cpu = nr_cpu_ids, tmp;
1678 int count = S32_MAX;
1679
1680 for_each_cpu(tmp, cpu_mask) {
1681 int this_count = its_read_lpi_count(d, tmp);
1682 if (this_count < count) {
1683 cpu = tmp;
1684 count = this_count;
1685 }
1686 }
1687
1688 return cpu;
1689 }
1690
1691 /*
1692 * As suggested by Thomas Gleixner in:
1693 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1694 */
its_select_cpu(struct irq_data * d,const struct cpumask * aff_mask)1695 static int its_select_cpu(struct irq_data *d,
1696 const struct cpumask *aff_mask)
1697 {
1698 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1699 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1700 static struct cpumask __tmpmask;
1701 struct cpumask *tmpmask;
1702 unsigned long flags;
1703 int cpu, node;
1704 node = its_dev->its->numa_node;
1705 tmpmask = &__tmpmask;
1706
1707 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1708
1709 if (!irqd_affinity_is_managed(d)) {
1710 /* First try the NUMA node */
1711 if (node != NUMA_NO_NODE) {
1712 /*
1713 * Try the intersection of the affinity mask and the
1714 * node mask (and the online mask, just to be safe).
1715 */
1716 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1717 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1718
1719 /*
1720 * Ideally, we would check if the mask is empty, and
1721 * try again on the full node here.
1722 *
1723 * But it turns out that the way ACPI describes the
1724 * affinity for ITSs only deals about memory, and
1725 * not target CPUs, so it cannot describe a single
1726 * ITS placed next to two NUMA nodes.
1727 *
1728 * Instead, just fallback on the online mask. This
1729 * diverges from Thomas' suggestion above.
1730 */
1731 cpu = cpumask_pick_least_loaded(d, tmpmask);
1732 if (cpu < nr_cpu_ids)
1733 goto out;
1734
1735 /* If we can't cross sockets, give up */
1736 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1737 goto out;
1738
1739 /* If the above failed, expand the search */
1740 }
1741
1742 /* Try the intersection of the affinity and online masks */
1743 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1744
1745 /* If that doesn't fly, the online mask is the last resort */
1746 if (cpumask_empty(tmpmask))
1747 cpumask_copy(tmpmask, cpu_online_mask);
1748
1749 cpu = cpumask_pick_least_loaded(d, tmpmask);
1750 } else {
1751 cpumask_copy(tmpmask, aff_mask);
1752
1753 /* If we cannot cross sockets, limit the search to that node */
1754 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1755 node != NUMA_NO_NODE)
1756 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1757
1758 cpu = cpumask_pick_least_loaded(d, tmpmask);
1759 }
1760 out:
1761 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1762
1763 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1764 return cpu;
1765 }
1766
its_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1767 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1768 bool force)
1769 {
1770 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1771 struct its_collection *target_col;
1772 u32 id = its_get_event_id(d);
1773 int cpu, prev_cpu;
1774
1775 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1776 if (irqd_is_forwarded_to_vcpu(d))
1777 return -EINVAL;
1778
1779 prev_cpu = its_dev->event_map.col_map[id];
1780 its_dec_lpi_count(d, prev_cpu);
1781
1782 if (!force)
1783 cpu = its_select_cpu(d, mask_val);
1784 else
1785 cpu = cpumask_pick_least_loaded(d, mask_val);
1786
1787 if (cpu < 0 || cpu >= nr_cpu_ids)
1788 goto err;
1789
1790 /* don't set the affinity when the target cpu is same as current one */
1791 if (cpu != prev_cpu) {
1792 target_col = &its_dev->its->collections[cpu];
1793 its_send_movi(its_dev, target_col, id);
1794 its_dev->event_map.col_map[id] = cpu;
1795 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1796 }
1797
1798 its_inc_lpi_count(d, cpu);
1799
1800 return IRQ_SET_MASK_OK_DONE;
1801
1802 err:
1803 its_inc_lpi_count(d, prev_cpu);
1804 return -EINVAL;
1805 }
1806
its_irq_get_msi_base(struct its_device * its_dev)1807 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1808 {
1809 struct its_node *its = its_dev->its;
1810
1811 return its->phys_base + GITS_TRANSLATER;
1812 }
1813
its_irq_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)1814 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1815 {
1816 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1817
1818 msg->data = its_get_event_id(d);
1819 msi_msg_set_addr(irq_data_get_msi_desc(d), msg,
1820 its_dev->its->get_msi_base(its_dev));
1821 }
1822
its_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)1823 static int its_irq_set_irqchip_state(struct irq_data *d,
1824 enum irqchip_irq_state which,
1825 bool state)
1826 {
1827 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1828 u32 event = its_get_event_id(d);
1829
1830 if (which != IRQCHIP_STATE_PENDING)
1831 return -EINVAL;
1832
1833 if (irqd_is_forwarded_to_vcpu(d)) {
1834 if (state)
1835 its_send_vint(its_dev, event);
1836 else
1837 its_send_vclear(its_dev, event);
1838 } else {
1839 if (state)
1840 its_send_int(its_dev, event);
1841 else
1842 its_send_clear(its_dev, event);
1843 }
1844
1845 return 0;
1846 }
1847
its_irq_retrigger(struct irq_data * d)1848 static int its_irq_retrigger(struct irq_data *d)
1849 {
1850 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1851 }
1852
1853 /*
1854 * Two favourable cases:
1855 *
1856 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1857 * for vSGI delivery
1858 *
1859 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1860 * and we're better off mapping all VPEs always
1861 *
1862 * If neither (a) nor (b) is true, then we map vPEs on demand.
1863 *
1864 */
gic_requires_eager_mapping(void)1865 static bool gic_requires_eager_mapping(void)
1866 {
1867 if (!its_list_map || gic_rdists->has_rvpeid)
1868 return true;
1869
1870 return false;
1871 }
1872
its_map_vm(struct its_node * its,struct its_vm * vm)1873 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1874 {
1875 if (gic_requires_eager_mapping())
1876 return;
1877
1878 guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1879
1880 /*
1881 * If the VM wasn't mapped yet, iterate over the vpes and get
1882 * them mapped now.
1883 */
1884 vm->vlpi_count[its->list_nr]++;
1885
1886 if (vm->vlpi_count[its->list_nr] == 1) {
1887 int i;
1888
1889 for (i = 0; i < vm->nr_vpes; i++) {
1890 struct its_vpe *vpe = vm->vpes[i];
1891
1892 scoped_guard(raw_spinlock, &vpe->vpe_lock)
1893 its_send_vmapp(its, vpe, true);
1894
1895 its_send_vinvall(its, vpe);
1896 }
1897 }
1898 }
1899
its_unmap_vm(struct its_node * its,struct its_vm * vm)1900 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1901 {
1902 /* Not using the ITS list? Everything is always mapped. */
1903 if (gic_requires_eager_mapping())
1904 return;
1905
1906 guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1907
1908 if (!--vm->vlpi_count[its->list_nr]) {
1909 int i;
1910
1911 for (i = 0; i < vm->nr_vpes; i++) {
1912 guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
1913 its_send_vmapp(its, vm->vpes[i], false);
1914 }
1915 }
1916 }
1917
its_vlpi_map(struct irq_data * d,struct its_cmd_info * info)1918 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1919 {
1920 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1921 u32 event = its_get_event_id(d);
1922
1923 if (!info->map)
1924 return -EINVAL;
1925
1926 if (!its_dev->event_map.vm) {
1927 struct its_vlpi_map *maps;
1928
1929 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1930 GFP_ATOMIC);
1931 if (!maps)
1932 return -ENOMEM;
1933
1934 its_dev->event_map.vm = info->map->vm;
1935 its_dev->event_map.vlpi_maps = maps;
1936 } else if (its_dev->event_map.vm != info->map->vm) {
1937 return -EINVAL;
1938 }
1939
1940 /* Get our private copy of the mapping information */
1941 its_dev->event_map.vlpi_maps[event] = *info->map;
1942
1943 if (irqd_is_forwarded_to_vcpu(d)) {
1944 /* Already mapped, move it around */
1945 its_send_vmovi(its_dev, event);
1946 } else {
1947 /* Ensure all the VPEs are mapped on this ITS */
1948 its_map_vm(its_dev->its, info->map->vm);
1949
1950 /*
1951 * Flag the interrupt as forwarded so that we can
1952 * start poking the virtual property table.
1953 */
1954 irqd_set_forwarded_to_vcpu(d);
1955
1956 /* Write out the property to the prop table */
1957 lpi_write_config(d, 0xff, info->map->properties);
1958
1959 /* Drop the physical mapping */
1960 its_send_discard(its_dev, event);
1961
1962 /* and install the virtual one */
1963 its_send_vmapti(its_dev, event);
1964
1965 /* Increment the number of VLPIs */
1966 its_dev->event_map.nr_vlpis++;
1967 }
1968
1969 return 0;
1970 }
1971
its_vlpi_get(struct irq_data * d,struct its_cmd_info * info)1972 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1973 {
1974 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1975 struct its_vlpi_map *map;
1976
1977 map = get_vlpi_map(d);
1978
1979 if (!its_dev->event_map.vm || !map)
1980 return -EINVAL;
1981
1982 /* Copy our mapping information to the incoming request */
1983 *info->map = *map;
1984
1985 return 0;
1986 }
1987
its_vlpi_unmap(struct irq_data * d)1988 static int its_vlpi_unmap(struct irq_data *d)
1989 {
1990 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1991 u32 event = its_get_event_id(d);
1992
1993 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1994 return -EINVAL;
1995
1996 /* Drop the virtual mapping */
1997 its_send_discard(its_dev, event);
1998
1999 /* and restore the physical one */
2000 irqd_clr_forwarded_to_vcpu(d);
2001 its_send_mapti(its_dev, d->hwirq, event);
2002 lpi_update_config(d, 0xff, (lpi_prop_prio |
2003 LPI_PROP_ENABLED |
2004 LPI_PROP_GROUP1));
2005
2006 /* Potentially unmap the VM from this ITS */
2007 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
2008
2009 /*
2010 * Drop the refcount and make the device available again if
2011 * this was the last VLPI.
2012 */
2013 if (!--its_dev->event_map.nr_vlpis) {
2014 its_dev->event_map.vm = NULL;
2015 kfree(its_dev->event_map.vlpi_maps);
2016 }
2017
2018 return 0;
2019 }
2020
its_vlpi_prop_update(struct irq_data * d,struct its_cmd_info * info)2021 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
2022 {
2023 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2024
2025 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
2026 return -EINVAL;
2027
2028 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
2029 lpi_update_config(d, 0xff, info->config);
2030 else
2031 lpi_write_config(d, 0xff, info->config);
2032 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
2033
2034 return 0;
2035 }
2036
its_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)2037 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2038 {
2039 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2040 struct its_cmd_info *info = vcpu_info;
2041
2042 /* Need a v4 ITS */
2043 if (!is_v4(its_dev->its))
2044 return -EINVAL;
2045
2046 guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
2047
2048 /* Unmap request? */
2049 if (!info)
2050 return its_vlpi_unmap(d);
2051
2052 switch (info->cmd_type) {
2053 case MAP_VLPI:
2054 return its_vlpi_map(d, info);
2055
2056 case GET_VLPI:
2057 return its_vlpi_get(d, info);
2058
2059 case PROP_UPDATE_VLPI:
2060 case PROP_UPDATE_AND_INV_VLPI:
2061 return its_vlpi_prop_update(d, info);
2062
2063 default:
2064 return -EINVAL;
2065 }
2066 }
2067
2068 static struct irq_chip its_irq_chip = {
2069 .name = "ITS",
2070 .irq_mask = its_mask_irq,
2071 .irq_unmask = its_unmask_irq,
2072 .irq_eoi = irq_chip_eoi_parent,
2073 .irq_set_affinity = its_set_affinity,
2074 .irq_compose_msi_msg = its_irq_compose_msi_msg,
2075 .irq_set_irqchip_state = its_irq_set_irqchip_state,
2076 .irq_retrigger = its_irq_retrigger,
2077 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
2078 };
2079
2080
2081 /*
2082 * How we allocate LPIs:
2083 *
2084 * lpi_range_list contains ranges of LPIs that are to available to
2085 * allocate from. To allocate LPIs, just pick the first range that
2086 * fits the required allocation, and reduce it by the required
2087 * amount. Once empty, remove the range from the list.
2088 *
2089 * To free a range of LPIs, add a free range to the list, sort it and
2090 * merge the result if the new range happens to be adjacent to an
2091 * already free block.
2092 *
2093 * The consequence of the above is that allocation is cost is low, but
2094 * freeing is expensive. We assumes that freeing rarely occurs.
2095 */
2096 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2097
2098 static DEFINE_MUTEX(lpi_range_lock);
2099 static LIST_HEAD(lpi_range_list);
2100
2101 struct lpi_range {
2102 struct list_head entry;
2103 u32 base_id;
2104 u32 span;
2105 };
2106
mk_lpi_range(u32 base,u32 span)2107 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2108 {
2109 struct lpi_range *range;
2110
2111 range = kmalloc(sizeof(*range), GFP_KERNEL);
2112 if (range) {
2113 range->base_id = base;
2114 range->span = span;
2115 }
2116
2117 return range;
2118 }
2119
alloc_lpi_range(u32 nr_lpis,u32 * base)2120 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2121 {
2122 struct lpi_range *range, *tmp;
2123 int err = -ENOSPC;
2124
2125 mutex_lock(&lpi_range_lock);
2126
2127 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2128 if (range->span >= nr_lpis) {
2129 *base = range->base_id;
2130 range->base_id += nr_lpis;
2131 range->span -= nr_lpis;
2132
2133 if (range->span == 0) {
2134 list_del(&range->entry);
2135 kfree(range);
2136 }
2137
2138 err = 0;
2139 break;
2140 }
2141 }
2142
2143 mutex_unlock(&lpi_range_lock);
2144
2145 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2146 return err;
2147 }
2148
merge_lpi_ranges(struct lpi_range * a,struct lpi_range * b)2149 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2150 {
2151 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2152 return;
2153 if (a->base_id + a->span != b->base_id)
2154 return;
2155 b->base_id = a->base_id;
2156 b->span += a->span;
2157 list_del(&a->entry);
2158 kfree(a);
2159 }
2160
free_lpi_range(u32 base,u32 nr_lpis)2161 static int free_lpi_range(u32 base, u32 nr_lpis)
2162 {
2163 struct lpi_range *new, *old;
2164
2165 new = mk_lpi_range(base, nr_lpis);
2166 if (!new)
2167 return -ENOMEM;
2168
2169 mutex_lock(&lpi_range_lock);
2170
2171 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2172 if (old->base_id < base)
2173 break;
2174 }
2175 /*
2176 * old is the last element with ->base_id smaller than base,
2177 * so new goes right after it. If there are no elements with
2178 * ->base_id smaller than base, &old->entry ends up pointing
2179 * at the head of the list, and inserting new it the start of
2180 * the list is the right thing to do in that case as well.
2181 */
2182 list_add(&new->entry, &old->entry);
2183 /*
2184 * Now check if we can merge with the preceding and/or
2185 * following ranges.
2186 */
2187 merge_lpi_ranges(old, new);
2188 merge_lpi_ranges(new, list_next_entry(new, entry));
2189
2190 mutex_unlock(&lpi_range_lock);
2191 return 0;
2192 }
2193
its_lpi_init(u32 id_bits)2194 static int __init its_lpi_init(u32 id_bits)
2195 {
2196 u32 lpis = (1UL << id_bits) - 8192;
2197 u32 numlpis;
2198 int err;
2199
2200 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2201
2202 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2203 lpis = numlpis;
2204 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2205 lpis);
2206 }
2207
2208 /*
2209 * Initializing the allocator is just the same as freeing the
2210 * full range of LPIs.
2211 */
2212 err = free_lpi_range(8192, lpis);
2213 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2214 return err;
2215 }
2216
its_lpi_alloc(int nr_irqs,u32 * base,int * nr_ids)2217 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2218 {
2219 unsigned long *bitmap = NULL;
2220 int err = 0;
2221
2222 do {
2223 err = alloc_lpi_range(nr_irqs, base);
2224 if (!err)
2225 break;
2226
2227 nr_irqs /= 2;
2228 } while (nr_irqs > 0);
2229
2230 if (!nr_irqs)
2231 err = -ENOSPC;
2232
2233 if (err)
2234 goto out;
2235
2236 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2237 if (!bitmap)
2238 goto out;
2239
2240 *nr_ids = nr_irqs;
2241
2242 out:
2243 if (!bitmap)
2244 *base = *nr_ids = 0;
2245
2246 return bitmap;
2247 }
2248
its_lpi_free(unsigned long * bitmap,u32 base,u32 nr_ids)2249 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2250 {
2251 WARN_ON(free_lpi_range(base, nr_ids));
2252 bitmap_free(bitmap);
2253 }
2254
gic_reset_prop_table(void * va)2255 static void gic_reset_prop_table(void *va)
2256 {
2257 /* Regular IRQ priority, Group-1, disabled */
2258 memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2259
2260 /* Make sure the GIC will observe the written configuration */
2261 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2262 }
2263
its_allocate_prop_table(gfp_t gfp_flags)2264 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2265 {
2266 struct page *prop_page;
2267
2268 prop_page = its_alloc_pages(gfp_flags,
2269 get_order(LPI_PROPBASE_SZ));
2270 if (!prop_page)
2271 return NULL;
2272
2273 gic_reset_prop_table(page_address(prop_page));
2274
2275 return prop_page;
2276 }
2277
its_free_prop_table(struct page * prop_page)2278 static void its_free_prop_table(struct page *prop_page)
2279 {
2280 its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
2281 }
2282
gic_check_reserved_range(phys_addr_t addr,unsigned long size)2283 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2284 {
2285 phys_addr_t start, end, addr_end;
2286 u64 i;
2287
2288 /*
2289 * We don't bother checking for a kdump kernel as by
2290 * construction, the LPI tables are out of this kernel's
2291 * memory map.
2292 */
2293 if (is_kdump_kernel())
2294 return true;
2295
2296 addr_end = addr + size - 1;
2297
2298 for_each_reserved_mem_range(i, &start, &end) {
2299 if (addr >= start && addr_end <= end)
2300 return true;
2301 }
2302
2303 /* Not found, not a good sign... */
2304 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2305 &addr, &addr_end);
2306 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2307 return false;
2308 }
2309
gic_reserve_range(phys_addr_t addr,unsigned long size)2310 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2311 {
2312 if (efi_enabled(EFI_CONFIG_TABLES))
2313 return efi_mem_reserve_persistent(addr, size);
2314
2315 return 0;
2316 }
2317
its_setup_lpi_prop_table(void)2318 static int __init its_setup_lpi_prop_table(void)
2319 {
2320 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2321 u64 val;
2322
2323 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2324 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2325
2326 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2327 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2328 LPI_PROPBASE_SZ,
2329 MEMREMAP_WB);
2330 gic_reset_prop_table(gic_rdists->prop_table_va);
2331 } else {
2332 struct page *page;
2333
2334 lpi_id_bits = min_t(u32,
2335 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2336 ITS_MAX_LPI_NRBITS);
2337 page = its_allocate_prop_table(GFP_NOWAIT);
2338 if (!page) {
2339 pr_err("Failed to allocate PROPBASE\n");
2340 return -ENOMEM;
2341 }
2342
2343 gic_rdists->prop_table_pa = page_to_phys(page);
2344 gic_rdists->prop_table_va = page_address(page);
2345 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2346 LPI_PROPBASE_SZ));
2347 }
2348
2349 pr_info("GICv3: using LPI property table @%pa\n",
2350 &gic_rdists->prop_table_pa);
2351
2352 return its_lpi_init(lpi_id_bits);
2353 }
2354
2355 static const char *its_base_type_string[] = {
2356 [GITS_BASER_TYPE_DEVICE] = "Devices",
2357 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2358 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2359 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2360 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2361 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2362 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2363 };
2364
its_read_baser(struct its_node * its,struct its_baser * baser)2365 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2366 {
2367 u32 idx = baser - its->tables;
2368
2369 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2370 }
2371
its_write_baser(struct its_node * its,struct its_baser * baser,u64 val)2372 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2373 u64 val)
2374 {
2375 u32 idx = baser - its->tables;
2376
2377 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2378 baser->val = its_read_baser(its, baser);
2379 }
2380
its_setup_baser(struct its_node * its,struct its_baser * baser,u64 cache,u64 shr,u32 order,bool indirect)2381 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2382 u64 cache, u64 shr, u32 order, bool indirect)
2383 {
2384 u64 val = its_read_baser(its, baser);
2385 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2386 u64 type = GITS_BASER_TYPE(val);
2387 u64 baser_phys, tmp;
2388 u32 alloc_pages, psz;
2389 struct page *page;
2390 void *base;
2391
2392 psz = baser->psz;
2393 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2394 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2395 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2396 &its->phys_base, its_base_type_string[type],
2397 alloc_pages, GITS_BASER_PAGES_MAX);
2398 alloc_pages = GITS_BASER_PAGES_MAX;
2399 order = get_order(GITS_BASER_PAGES_MAX * psz);
2400 }
2401
2402 page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2403 if (!page)
2404 return -ENOMEM;
2405
2406 base = (void *)page_address(page);
2407 baser_phys = virt_to_phys(base);
2408
2409 /* Check if the physical address of the memory is above 48bits */
2410 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2411
2412 /* 52bit PA is supported only when PageSize=64K */
2413 if (psz != SZ_64K) {
2414 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2415 its_free_pages(base, order);
2416 return -ENXIO;
2417 }
2418
2419 /* Convert 52bit PA to 48bit field */
2420 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2421 }
2422
2423 retry_baser:
2424 val = (baser_phys |
2425 (type << GITS_BASER_TYPE_SHIFT) |
2426 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2427 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2428 cache |
2429 shr |
2430 GITS_BASER_VALID);
2431
2432 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2433
2434 switch (psz) {
2435 case SZ_4K:
2436 val |= GITS_BASER_PAGE_SIZE_4K;
2437 break;
2438 case SZ_16K:
2439 val |= GITS_BASER_PAGE_SIZE_16K;
2440 break;
2441 case SZ_64K:
2442 val |= GITS_BASER_PAGE_SIZE_64K;
2443 break;
2444 }
2445
2446 if (!shr)
2447 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2448
2449 its_write_baser(its, baser, val);
2450 tmp = baser->val;
2451
2452 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2453 /*
2454 * Shareability didn't stick. Just use
2455 * whatever the read reported, which is likely
2456 * to be the only thing this redistributor
2457 * supports. If that's zero, make it
2458 * non-cacheable as well.
2459 */
2460 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2461 if (!shr)
2462 cache = GITS_BASER_nC;
2463
2464 goto retry_baser;
2465 }
2466
2467 if (val != tmp) {
2468 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2469 &its->phys_base, its_base_type_string[type],
2470 val, tmp);
2471 its_free_pages(base, order);
2472 return -ENXIO;
2473 }
2474
2475 baser->order = order;
2476 baser->base = base;
2477 baser->psz = psz;
2478 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2479
2480 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2481 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2482 its_base_type_string[type],
2483 (unsigned long)virt_to_phys(base),
2484 indirect ? "indirect" : "flat", (int)esz,
2485 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2486
2487 return 0;
2488 }
2489
its_parse_indirect_baser(struct its_node * its,struct its_baser * baser,u32 * order,u32 ids)2490 static bool its_parse_indirect_baser(struct its_node *its,
2491 struct its_baser *baser,
2492 u32 *order, u32 ids)
2493 {
2494 u64 tmp = its_read_baser(its, baser);
2495 u64 type = GITS_BASER_TYPE(tmp);
2496 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2497 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2498 u32 new_order = *order;
2499 u32 psz = baser->psz;
2500 bool indirect = false;
2501
2502 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2503 if ((esz << ids) > (psz * 2)) {
2504 /*
2505 * Find out whether hw supports a single or two-level table by
2506 * table by reading bit at offset '62' after writing '1' to it.
2507 */
2508 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2509 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2510
2511 if (indirect) {
2512 /*
2513 * The size of the lvl2 table is equal to ITS page size
2514 * which is 'psz'. For computing lvl1 table size,
2515 * subtract ID bits that sparse lvl2 table from 'ids'
2516 * which is reported by ITS hardware times lvl1 table
2517 * entry size.
2518 */
2519 ids -= ilog2(psz / (int)esz);
2520 esz = GITS_LVL1_ENTRY_SIZE;
2521 }
2522 }
2523
2524 /*
2525 * Allocate as many entries as required to fit the
2526 * range of device IDs that the ITS can grok... The ID
2527 * space being incredibly sparse, this results in a
2528 * massive waste of memory if two-level device table
2529 * feature is not supported by hardware.
2530 */
2531 new_order = max_t(u32, get_order(esz << ids), new_order);
2532 if (new_order > MAX_PAGE_ORDER) {
2533 new_order = MAX_PAGE_ORDER;
2534 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2535 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2536 &its->phys_base, its_base_type_string[type],
2537 device_ids(its), ids);
2538 }
2539
2540 *order = new_order;
2541
2542 return indirect;
2543 }
2544
compute_common_aff(u64 val)2545 static u32 compute_common_aff(u64 val)
2546 {
2547 u32 aff, clpiaff;
2548
2549 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2550 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2551
2552 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2553 }
2554
compute_its_aff(struct its_node * its)2555 static u32 compute_its_aff(struct its_node *its)
2556 {
2557 u64 val;
2558 u32 svpet;
2559
2560 /*
2561 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2562 * the resulting affinity. We then use that to see if this match
2563 * our own affinity.
2564 */
2565 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2566 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2567 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2568 return compute_common_aff(val);
2569 }
2570
find_sibling_its(struct its_node * cur_its)2571 static struct its_node *find_sibling_its(struct its_node *cur_its)
2572 {
2573 struct its_node *its;
2574 u32 aff;
2575
2576 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2577 return NULL;
2578
2579 aff = compute_its_aff(cur_its);
2580
2581 list_for_each_entry(its, &its_nodes, entry) {
2582 u64 baser;
2583
2584 if (!is_v4_1(its) || its == cur_its)
2585 continue;
2586
2587 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2588 continue;
2589
2590 if (aff != compute_its_aff(its))
2591 continue;
2592
2593 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2594 baser = its->tables[2].val;
2595 if (!(baser & GITS_BASER_VALID))
2596 continue;
2597
2598 return its;
2599 }
2600
2601 return NULL;
2602 }
2603
its_free_tables(struct its_node * its)2604 static void its_free_tables(struct its_node *its)
2605 {
2606 int i;
2607
2608 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2609 if (its->tables[i].base) {
2610 its_free_pages(its->tables[i].base, its->tables[i].order);
2611 its->tables[i].base = NULL;
2612 }
2613 }
2614 }
2615
its_probe_baser_psz(struct its_node * its,struct its_baser * baser)2616 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2617 {
2618 u64 psz = SZ_64K;
2619
2620 while (psz) {
2621 u64 val, gpsz;
2622
2623 val = its_read_baser(its, baser);
2624 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2625
2626 switch (psz) {
2627 case SZ_64K:
2628 gpsz = GITS_BASER_PAGE_SIZE_64K;
2629 break;
2630 case SZ_16K:
2631 gpsz = GITS_BASER_PAGE_SIZE_16K;
2632 break;
2633 case SZ_4K:
2634 default:
2635 gpsz = GITS_BASER_PAGE_SIZE_4K;
2636 break;
2637 }
2638
2639 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2640
2641 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2642 its_write_baser(its, baser, val);
2643
2644 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2645 break;
2646
2647 switch (psz) {
2648 case SZ_64K:
2649 psz = SZ_16K;
2650 break;
2651 case SZ_16K:
2652 psz = SZ_4K;
2653 break;
2654 case SZ_4K:
2655 default:
2656 return -1;
2657 }
2658 }
2659
2660 baser->psz = psz;
2661 return 0;
2662 }
2663
its_alloc_tables(struct its_node * its)2664 static int its_alloc_tables(struct its_node *its)
2665 {
2666 u64 shr = GITS_BASER_InnerShareable;
2667 u64 cache = GITS_BASER_RaWaWb;
2668 int err, i;
2669
2670 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2671 /* erratum 24313: ignore memory access type */
2672 cache = GITS_BASER_nCnB;
2673
2674 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2675 cache = GITS_BASER_nC;
2676 shr = 0;
2677 }
2678
2679 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2680 struct its_baser *baser = its->tables + i;
2681 u64 val = its_read_baser(its, baser);
2682 u64 type = GITS_BASER_TYPE(val);
2683 bool indirect = false;
2684 u32 order;
2685
2686 if (type == GITS_BASER_TYPE_NONE)
2687 continue;
2688
2689 if (its_probe_baser_psz(its, baser)) {
2690 its_free_tables(its);
2691 return -ENXIO;
2692 }
2693
2694 order = get_order(baser->psz);
2695
2696 switch (type) {
2697 case GITS_BASER_TYPE_DEVICE:
2698 indirect = its_parse_indirect_baser(its, baser, &order,
2699 device_ids(its));
2700 break;
2701
2702 case GITS_BASER_TYPE_VCPU:
2703 if (is_v4_1(its)) {
2704 struct its_node *sibling;
2705
2706 WARN_ON(i != 2);
2707 if ((sibling = find_sibling_its(its))) {
2708 *baser = sibling->tables[2];
2709 its_write_baser(its, baser, baser->val);
2710 continue;
2711 }
2712 }
2713
2714 indirect = its_parse_indirect_baser(its, baser, &order,
2715 ITS_MAX_VPEID_BITS);
2716 break;
2717 }
2718
2719 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2720 if (err < 0) {
2721 its_free_tables(its);
2722 return err;
2723 }
2724
2725 /* Update settings which will be used for next BASERn */
2726 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2727 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2728 }
2729
2730 return 0;
2731 }
2732
inherit_vpe_l1_table_from_its(void)2733 static u64 inherit_vpe_l1_table_from_its(void)
2734 {
2735 struct its_node *its;
2736 u64 val;
2737 u32 aff;
2738
2739 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2740 aff = compute_common_aff(val);
2741
2742 list_for_each_entry(its, &its_nodes, entry) {
2743 u64 baser, addr;
2744
2745 if (!is_v4_1(its))
2746 continue;
2747
2748 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2749 continue;
2750
2751 if (aff != compute_its_aff(its))
2752 continue;
2753
2754 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2755 baser = its->tables[2].val;
2756 if (!(baser & GITS_BASER_VALID))
2757 continue;
2758
2759 /* We have a winner! */
2760 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2761
2762 val = GICR_VPROPBASER_4_1_VALID;
2763 if (baser & GITS_BASER_INDIRECT)
2764 val |= GICR_VPROPBASER_4_1_INDIRECT;
2765 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2766 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2767 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2768 case GIC_PAGE_SIZE_64K:
2769 addr = GITS_BASER_ADDR_48_to_52(baser);
2770 break;
2771 default:
2772 addr = baser & GENMASK_ULL(47, 12);
2773 break;
2774 }
2775 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2776 if (rdists_support_shareable()) {
2777 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2778 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2779 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2780 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2781 }
2782 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2783
2784 *this_cpu_ptr(&local_4_1_its) = its;
2785 return val;
2786 }
2787
2788 return 0;
2789 }
2790
inherit_vpe_l1_table_from_rd(cpumask_t ** mask)2791 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2792 {
2793 u32 aff;
2794 u64 val;
2795 int cpu;
2796
2797 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2798 aff = compute_common_aff(val);
2799
2800 for_each_possible_cpu(cpu) {
2801 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2802
2803 if (!base || cpu == smp_processor_id())
2804 continue;
2805
2806 val = gic_read_typer(base + GICR_TYPER);
2807 if (aff != compute_common_aff(val))
2808 continue;
2809
2810 /*
2811 * At this point, we have a victim. This particular CPU
2812 * has already booted, and has an affinity that matches
2813 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2814 * Make sure we don't write the Z bit in that case.
2815 */
2816 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2817 val &= ~GICR_VPROPBASER_4_1_Z;
2818
2819 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2820 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2821
2822 *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
2823 return val;
2824 }
2825
2826 return 0;
2827 }
2828
allocate_vpe_l2_table(int cpu,u32 id)2829 static bool allocate_vpe_l2_table(int cpu, u32 id)
2830 {
2831 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2832 unsigned int psz, esz, idx, npg, gpsz;
2833 u64 val;
2834 struct page *page;
2835 __le64 *table;
2836
2837 if (!gic_rdists->has_rvpeid)
2838 return true;
2839
2840 /* Skip non-present CPUs */
2841 if (!base)
2842 return true;
2843
2844 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2845
2846 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2847 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2848 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2849
2850 switch (gpsz) {
2851 default:
2852 WARN_ON(1);
2853 fallthrough;
2854 case GIC_PAGE_SIZE_4K:
2855 psz = SZ_4K;
2856 break;
2857 case GIC_PAGE_SIZE_16K:
2858 psz = SZ_16K;
2859 break;
2860 case GIC_PAGE_SIZE_64K:
2861 psz = SZ_64K;
2862 break;
2863 }
2864
2865 /* Don't allow vpe_id that exceeds single, flat table limit */
2866 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2867 return (id < (npg * psz / (esz * SZ_8)));
2868
2869 /* Compute 1st level table index & check if that exceeds table limit */
2870 idx = id >> ilog2(psz / (esz * SZ_8));
2871 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2872 return false;
2873
2874 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2875
2876 /* Allocate memory for 2nd level table */
2877 if (!table[idx]) {
2878 page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2879 if (!page)
2880 return false;
2881
2882 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2883 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2884 gic_flush_dcache_to_poc(page_address(page), psz);
2885
2886 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2887
2888 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2889 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2890 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2891
2892 /* Ensure updated table contents are visible to RD hardware */
2893 dsb(sy);
2894 }
2895
2896 return true;
2897 }
2898
allocate_vpe_l1_table(void)2899 static int allocate_vpe_l1_table(void)
2900 {
2901 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2902 u64 val, gpsz, npg, pa;
2903 unsigned int psz = SZ_64K;
2904 unsigned int np, epp, esz;
2905 struct page *page;
2906
2907 if (!gic_rdists->has_rvpeid)
2908 return 0;
2909
2910 /*
2911 * if VPENDBASER.Valid is set, disable any previously programmed
2912 * VPE by setting PendingLast while clearing Valid. This has the
2913 * effect of making sure no doorbell will be generated and we can
2914 * then safely clear VPROPBASER.Valid.
2915 */
2916 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2917 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2918 vlpi_base + GICR_VPENDBASER);
2919
2920 /*
2921 * If we can inherit the configuration from another RD, let's do
2922 * so. Otherwise, we have to go through the allocation process. We
2923 * assume that all RDs have the exact same requirements, as
2924 * nothing will work otherwise.
2925 */
2926 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2927 if (val & GICR_VPROPBASER_4_1_VALID)
2928 goto out;
2929
2930 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2931 if (!gic_data_rdist()->vpe_table_mask)
2932 return -ENOMEM;
2933
2934 val = inherit_vpe_l1_table_from_its();
2935 if (val & GICR_VPROPBASER_4_1_VALID)
2936 goto out;
2937
2938 /* First probe the page size */
2939 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2940 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2941 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2942 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2943 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2944
2945 switch (gpsz) {
2946 default:
2947 gpsz = GIC_PAGE_SIZE_4K;
2948 fallthrough;
2949 case GIC_PAGE_SIZE_4K:
2950 psz = SZ_4K;
2951 break;
2952 case GIC_PAGE_SIZE_16K:
2953 psz = SZ_16K;
2954 break;
2955 case GIC_PAGE_SIZE_64K:
2956 psz = SZ_64K;
2957 break;
2958 }
2959
2960 /*
2961 * Start populating the register from scratch, including RO fields
2962 * (which we want to print in debug cases...)
2963 */
2964 val = 0;
2965 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2966 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2967
2968 /* How many entries per GIC page? */
2969 esz++;
2970 epp = psz / (esz * SZ_8);
2971
2972 /*
2973 * If we need more than just a single L1 page, flag the table
2974 * as indirect and compute the number of required L1 pages.
2975 */
2976 if (epp < ITS_MAX_VPEID) {
2977 int nl2;
2978
2979 val |= GICR_VPROPBASER_4_1_INDIRECT;
2980
2981 /* Number of L2 pages required to cover the VPEID space */
2982 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2983
2984 /* Number of L1 pages to point to the L2 pages */
2985 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2986 } else {
2987 npg = 1;
2988 }
2989
2990 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2991
2992 /* Right, that's the number of CPU pages we need for L1 */
2993 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2994
2995 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2996 np, npg, psz, epp, esz);
2997 page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2998 if (!page)
2999 return -ENOMEM;
3000
3001 gic_data_rdist()->vpe_l1_base = page_address(page);
3002 pa = virt_to_phys(page_address(page));
3003 WARN_ON(!IS_ALIGNED(pa, psz));
3004
3005 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
3006 if (rdists_support_shareable()) {
3007 val |= GICR_VPROPBASER_RaWb;
3008 val |= GICR_VPROPBASER_InnerShareable;
3009 }
3010 val |= GICR_VPROPBASER_4_1_Z;
3011 val |= GICR_VPROPBASER_4_1_VALID;
3012
3013 out:
3014 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3015 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
3016
3017 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
3018 smp_processor_id(), val,
3019 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
3020
3021 return 0;
3022 }
3023
its_alloc_collections(struct its_node * its)3024 static int its_alloc_collections(struct its_node *its)
3025 {
3026 int i;
3027
3028 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
3029 GFP_KERNEL);
3030 if (!its->collections)
3031 return -ENOMEM;
3032
3033 for (i = 0; i < nr_cpu_ids; i++)
3034 its->collections[i].target_address = ~0ULL;
3035
3036 return 0;
3037 }
3038
its_allocate_pending_table(gfp_t gfp_flags)3039 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
3040 {
3041 struct page *pend_page;
3042
3043 pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
3044 if (!pend_page)
3045 return NULL;
3046
3047 /* Make sure the GIC will observe the zero-ed page */
3048 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
3049
3050 return pend_page;
3051 }
3052
its_free_pending_table(struct page * pt)3053 static void its_free_pending_table(struct page *pt)
3054 {
3055 its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
3056 }
3057
3058 /*
3059 * Booting with kdump and LPIs enabled is generally fine. Any other
3060 * case is wrong in the absence of firmware/EFI support.
3061 */
enabled_lpis_allowed(void)3062 static bool enabled_lpis_allowed(void)
3063 {
3064 phys_addr_t addr;
3065 u64 val;
3066
3067 /* Check whether the property table is in a reserved region */
3068 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
3069 addr = val & GENMASK_ULL(51, 12);
3070
3071 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
3072 }
3073
allocate_lpi_tables(void)3074 static int __init allocate_lpi_tables(void)
3075 {
3076 u64 val;
3077 int err, cpu;
3078
3079 /*
3080 * If LPIs are enabled while we run this from the boot CPU,
3081 * flag the RD tables as pre-allocated if the stars do align.
3082 */
3083 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3084 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3085 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3086 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3087 pr_info("GICv3: Using preallocated redistributor tables\n");
3088 }
3089
3090 err = its_setup_lpi_prop_table();
3091 if (err)
3092 return err;
3093
3094 /*
3095 * We allocate all the pending tables anyway, as we may have a
3096 * mix of RDs that have had LPIs enabled, and some that
3097 * don't. We'll free the unused ones as each CPU comes online.
3098 */
3099 for_each_possible_cpu(cpu) {
3100 struct page *pend_page;
3101
3102 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3103 if (!pend_page) {
3104 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3105 return -ENOMEM;
3106 }
3107
3108 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3109 }
3110
3111 return 0;
3112 }
3113
read_vpend_dirty_clear(void __iomem * vlpi_base)3114 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3115 {
3116 u32 count = 1000000; /* 1s! */
3117 bool clean;
3118 u64 val;
3119
3120 do {
3121 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3122 clean = !(val & GICR_VPENDBASER_Dirty);
3123 if (!clean) {
3124 count--;
3125 cpu_relax();
3126 udelay(1);
3127 }
3128 } while (!clean && count);
3129
3130 if (unlikely(!clean))
3131 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3132
3133 return val;
3134 }
3135
its_clear_vpend_valid(void __iomem * vlpi_base,u64 clr,u64 set)3136 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3137 {
3138 u64 val;
3139
3140 /* Make sure we wait until the RD is done with the initial scan */
3141 val = read_vpend_dirty_clear(vlpi_base);
3142 val &= ~GICR_VPENDBASER_Valid;
3143 val &= ~clr;
3144 val |= set;
3145 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3146
3147 val = read_vpend_dirty_clear(vlpi_base);
3148 if (unlikely(val & GICR_VPENDBASER_Dirty))
3149 val |= GICR_VPENDBASER_PendingLast;
3150
3151 return val;
3152 }
3153
its_cpu_init_lpis(void)3154 static void its_cpu_init_lpis(void)
3155 {
3156 void __iomem *rbase = gic_data_rdist_rd_base();
3157 struct page *pend_page;
3158 phys_addr_t paddr;
3159 u64 val, tmp;
3160
3161 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3162 return;
3163
3164 val = readl_relaxed(rbase + GICR_CTLR);
3165 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3166 (val & GICR_CTLR_ENABLE_LPIS)) {
3167 /*
3168 * Check that we get the same property table on all
3169 * RDs. If we don't, this is hopeless.
3170 */
3171 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3172 paddr &= GENMASK_ULL(51, 12);
3173 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3174 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3175
3176 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3177 paddr &= GENMASK_ULL(51, 16);
3178
3179 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3180 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3181
3182 goto out;
3183 }
3184
3185 pend_page = gic_data_rdist()->pend_page;
3186 paddr = page_to_phys(pend_page);
3187
3188 /* set PROPBASE */
3189 val = (gic_rdists->prop_table_pa |
3190 GICR_PROPBASER_InnerShareable |
3191 GICR_PROPBASER_RaWaWb |
3192 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3193
3194 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3195 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3196
3197 if (!rdists_support_shareable())
3198 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3199
3200 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3201 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3202 /*
3203 * The HW reports non-shareable, we must
3204 * remove the cacheability attributes as
3205 * well.
3206 */
3207 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3208 GICR_PROPBASER_CACHEABILITY_MASK);
3209 val |= GICR_PROPBASER_nC;
3210 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3211 }
3212 pr_info_once("GIC: using cache flushing for LPI property table\n");
3213 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3214 }
3215
3216 /* set PENDBASE */
3217 val = (page_to_phys(pend_page) |
3218 GICR_PENDBASER_InnerShareable |
3219 GICR_PENDBASER_RaWaWb);
3220
3221 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3222 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3223
3224 if (!rdists_support_shareable())
3225 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3226
3227 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3228 /*
3229 * The HW reports non-shareable, we must remove the
3230 * cacheability attributes as well.
3231 */
3232 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3233 GICR_PENDBASER_CACHEABILITY_MASK);
3234 val |= GICR_PENDBASER_nC;
3235 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3236 }
3237
3238 /* Enable LPIs */
3239 val = readl_relaxed(rbase + GICR_CTLR);
3240 val |= GICR_CTLR_ENABLE_LPIS;
3241 writel_relaxed(val, rbase + GICR_CTLR);
3242
3243 out:
3244 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3245 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3246
3247 /*
3248 * It's possible for CPU to receive VLPIs before it is
3249 * scheduled as a vPE, especially for the first CPU, and the
3250 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3251 * as out of range and dropped by GIC.
3252 * So we initialize IDbits to known value to avoid VLPI drop.
3253 */
3254 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3255 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3256 smp_processor_id(), val);
3257 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3258
3259 /*
3260 * Also clear Valid bit of GICR_VPENDBASER, in case some
3261 * ancient programming gets left in and has possibility of
3262 * corrupting memory.
3263 */
3264 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3265 }
3266
3267 if (allocate_vpe_l1_table()) {
3268 /*
3269 * If the allocation has failed, we're in massive trouble.
3270 * Disable direct injection, and pray that no VM was
3271 * already running...
3272 */
3273 gic_rdists->has_rvpeid = false;
3274 gic_rdists->has_vlpis = false;
3275 }
3276
3277 /* Make sure the GIC has seen the above */
3278 dsb(sy);
3279 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3280 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3281 smp_processor_id(),
3282 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3283 "reserved" : "allocated",
3284 &paddr);
3285 }
3286
its_cpu_init_collection(struct its_node * its)3287 static void its_cpu_init_collection(struct its_node *its)
3288 {
3289 int cpu = smp_processor_id();
3290 u64 target;
3291
3292 /* avoid cross node collections and its mapping */
3293 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3294 struct device_node *cpu_node;
3295
3296 cpu_node = of_get_cpu_node(cpu, NULL);
3297 if (its->numa_node != NUMA_NO_NODE &&
3298 its->numa_node != of_node_to_nid(cpu_node))
3299 return;
3300 }
3301
3302 /*
3303 * We now have to bind each collection to its target
3304 * redistributor.
3305 */
3306 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3307 /*
3308 * This ITS wants the physical address of the
3309 * redistributor.
3310 */
3311 target = gic_data_rdist()->phys_base;
3312 } else {
3313 /* This ITS wants a linear CPU number. */
3314 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3315 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3316 }
3317
3318 /* Perform collection mapping */
3319 its->collections[cpu].target_address = target;
3320 its->collections[cpu].col_id = cpu;
3321
3322 its_send_mapc(its, &its->collections[cpu], 1);
3323 its_send_invall(its, &its->collections[cpu]);
3324 }
3325
its_cpu_init_collections(void)3326 static void its_cpu_init_collections(void)
3327 {
3328 struct its_node *its;
3329
3330 raw_spin_lock(&its_lock);
3331
3332 list_for_each_entry(its, &its_nodes, entry)
3333 its_cpu_init_collection(its);
3334
3335 raw_spin_unlock(&its_lock);
3336 }
3337
its_find_device(struct its_node * its,u32 dev_id)3338 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3339 {
3340 struct its_device *its_dev = NULL, *tmp;
3341 unsigned long flags;
3342
3343 raw_spin_lock_irqsave(&its->lock, flags);
3344
3345 list_for_each_entry(tmp, &its->its_device_list, entry) {
3346 if (tmp->device_id == dev_id) {
3347 its_dev = tmp;
3348 break;
3349 }
3350 }
3351
3352 raw_spin_unlock_irqrestore(&its->lock, flags);
3353
3354 return its_dev;
3355 }
3356
its_get_baser(struct its_node * its,u32 type)3357 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3358 {
3359 int i;
3360
3361 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3362 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3363 return &its->tables[i];
3364 }
3365
3366 return NULL;
3367 }
3368
its_alloc_table_entry(struct its_node * its,struct its_baser * baser,u32 id)3369 static bool its_alloc_table_entry(struct its_node *its,
3370 struct its_baser *baser, u32 id)
3371 {
3372 struct page *page;
3373 u32 esz, idx;
3374 __le64 *table;
3375
3376 /* Don't allow device id that exceeds single, flat table limit */
3377 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3378 if (!(baser->val & GITS_BASER_INDIRECT))
3379 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3380
3381 /* Compute 1st level table index & check if that exceeds table limit */
3382 idx = id >> ilog2(baser->psz / esz);
3383 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3384 return false;
3385
3386 table = baser->base;
3387
3388 /* Allocate memory for 2nd level table */
3389 if (!table[idx]) {
3390 page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3391 get_order(baser->psz));
3392 if (!page)
3393 return false;
3394
3395 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3396 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3397 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3398
3399 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3400
3401 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3402 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3403 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3404
3405 /* Ensure updated table contents are visible to ITS hardware */
3406 dsb(sy);
3407 }
3408
3409 return true;
3410 }
3411
its_alloc_device_table(struct its_node * its,u32 dev_id)3412 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3413 {
3414 struct its_baser *baser;
3415
3416 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3417
3418 /* Don't allow device id that exceeds ITS hardware limit */
3419 if (!baser)
3420 return (ilog2(dev_id) < device_ids(its));
3421
3422 return its_alloc_table_entry(its, baser, dev_id);
3423 }
3424
its_alloc_vpe_table(u32 vpe_id)3425 static bool its_alloc_vpe_table(u32 vpe_id)
3426 {
3427 struct its_node *its;
3428 int cpu;
3429
3430 /*
3431 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3432 * could try and only do it on ITSs corresponding to devices
3433 * that have interrupts targeted at this VPE, but the
3434 * complexity becomes crazy (and you have tons of memory
3435 * anyway, right?).
3436 */
3437 list_for_each_entry(its, &its_nodes, entry) {
3438 struct its_baser *baser;
3439
3440 if (!is_v4(its))
3441 continue;
3442
3443 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3444 if (!baser)
3445 return false;
3446
3447 if (!its_alloc_table_entry(its, baser, vpe_id))
3448 return false;
3449 }
3450
3451 /* Non v4.1? No need to iterate RDs and go back early. */
3452 if (!gic_rdists->has_rvpeid)
3453 return true;
3454
3455 /*
3456 * Make sure the L2 tables are allocated for all copies of
3457 * the L1 table on *all* v4.1 RDs.
3458 */
3459 for_each_possible_cpu(cpu) {
3460 if (!allocate_vpe_l2_table(cpu, vpe_id))
3461 return false;
3462 }
3463
3464 return true;
3465 }
3466
its_create_device(struct its_node * its,u32 dev_id,int nvecs,bool alloc_lpis)3467 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3468 int nvecs, bool alloc_lpis)
3469 {
3470 struct its_device *dev;
3471 unsigned long *lpi_map = NULL;
3472 unsigned long flags;
3473 u16 *col_map = NULL;
3474 void *itt;
3475 int lpi_base;
3476 int nr_lpis;
3477 int nr_ites;
3478 int sz;
3479
3480 if (!its_alloc_device_table(its, dev_id))
3481 return NULL;
3482
3483 if (WARN_ON(!is_power_of_2(nvecs)))
3484 nvecs = roundup_pow_of_two(nvecs);
3485
3486 /*
3487 * Even if the device wants a single LPI, the ITT must be
3488 * sized as a power of two (and you need at least one bit...).
3489 */
3490 nr_ites = max(2, nvecs);
3491 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3492 sz = max(sz, ITS_ITT_ALIGN);
3493
3494 itt = itt_alloc_pool(its->numa_node, sz);
3495
3496 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3497
3498 if (alloc_lpis) {
3499 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3500 if (lpi_map)
3501 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3502 GFP_KERNEL);
3503 } else {
3504 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3505 nr_lpis = 0;
3506 lpi_base = 0;
3507 }
3508
3509 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3510 kfree(dev);
3511 itt_free_pool(itt, sz);
3512 bitmap_free(lpi_map);
3513 kfree(col_map);
3514 return NULL;
3515 }
3516
3517 gic_flush_dcache_to_poc(itt, sz);
3518
3519 dev->its = its;
3520 dev->itt = itt;
3521 dev->itt_sz = sz;
3522 dev->nr_ites = nr_ites;
3523 dev->event_map.lpi_map = lpi_map;
3524 dev->event_map.col_map = col_map;
3525 dev->event_map.lpi_base = lpi_base;
3526 dev->event_map.nr_lpis = nr_lpis;
3527 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3528 dev->device_id = dev_id;
3529 INIT_LIST_HEAD(&dev->entry);
3530
3531 raw_spin_lock_irqsave(&its->lock, flags);
3532 list_add(&dev->entry, &its->its_device_list);
3533 raw_spin_unlock_irqrestore(&its->lock, flags);
3534
3535 /* Map device to its ITT */
3536 its_send_mapd(dev, 1);
3537
3538 return dev;
3539 }
3540
its_free_device(struct its_device * its_dev)3541 static void its_free_device(struct its_device *its_dev)
3542 {
3543 unsigned long flags;
3544
3545 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3546 list_del(&its_dev->entry);
3547 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3548 kfree(its_dev->event_map.col_map);
3549 itt_free_pool(its_dev->itt, its_dev->itt_sz);
3550 kfree(its_dev);
3551 }
3552
its_alloc_device_irq(struct its_device * dev,int nvecs,irq_hw_number_t * hwirq)3553 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3554 {
3555 int idx;
3556
3557 /* Find a free LPI region in lpi_map and allocate them. */
3558 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3559 dev->event_map.nr_lpis,
3560 get_count_order(nvecs));
3561 if (idx < 0)
3562 return -ENOSPC;
3563
3564 *hwirq = dev->event_map.lpi_base + idx;
3565
3566 return 0;
3567 }
3568
its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)3569 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3570 int nvec, msi_alloc_info_t *info)
3571 {
3572 struct its_node *its;
3573 struct its_device *its_dev;
3574 struct msi_domain_info *msi_info;
3575 u32 dev_id;
3576 int err = 0;
3577
3578 /*
3579 * We ignore "dev" entirely, and rely on the dev_id that has
3580 * been passed via the scratchpad. This limits this domain's
3581 * usefulness to upper layers that definitely know that they
3582 * are built on top of the ITS.
3583 */
3584 dev_id = info->scratchpad[0].ul;
3585
3586 msi_info = msi_get_domain_info(domain);
3587 its = msi_info->data;
3588
3589 if (!gic_rdists->has_direct_lpi &&
3590 vpe_proxy.dev &&
3591 vpe_proxy.dev->its == its &&
3592 dev_id == vpe_proxy.dev->device_id) {
3593 /* Bad luck. Get yourself a better implementation */
3594 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3595 dev_id);
3596 return -EINVAL;
3597 }
3598
3599 mutex_lock(&its->dev_alloc_lock);
3600 its_dev = its_find_device(its, dev_id);
3601 if (its_dev) {
3602 /*
3603 * We already have seen this ID, probably through
3604 * another alias (PCI bridge of some sort). No need to
3605 * create the device.
3606 */
3607 its_dev->shared = true;
3608 pr_debug("Reusing ITT for devID %x\n", dev_id);
3609 goto out;
3610 }
3611
3612 its_dev = its_create_device(its, dev_id, nvec, true);
3613 if (!its_dev) {
3614 err = -ENOMEM;
3615 goto out;
3616 }
3617
3618 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3619 its_dev->shared = true;
3620
3621 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3622 out:
3623 mutex_unlock(&its->dev_alloc_lock);
3624 info->scratchpad[0].ptr = its_dev;
3625 return err;
3626 }
3627
its_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * info)3628 static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
3629 {
3630 struct its_device *its_dev = info->scratchpad[0].ptr;
3631
3632 guard(mutex)(&its_dev->its->dev_alloc_lock);
3633
3634 /* If the device is shared, keep everything around */
3635 if (its_dev->shared)
3636 return;
3637
3638 /* LPIs should have been already unmapped at this stage */
3639 if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
3640 its_dev->event_map.nr_lpis)))
3641 return;
3642
3643 its_lpi_free(its_dev->event_map.lpi_map,
3644 its_dev->event_map.lpi_base,
3645 its_dev->event_map.nr_lpis);
3646
3647 /* Unmap device/itt, and get rid of the tracking */
3648 its_send_mapd(its_dev, 0);
3649 its_free_device(its_dev);
3650 }
3651
3652 static struct msi_domain_ops its_msi_domain_ops = {
3653 .msi_prepare = its_msi_prepare,
3654 .msi_teardown = its_msi_teardown,
3655 };
3656
its_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)3657 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3658 unsigned int virq,
3659 irq_hw_number_t hwirq)
3660 {
3661 struct irq_fwspec fwspec;
3662
3663 if (irq_domain_get_of_node(domain->parent)) {
3664 fwspec.fwnode = domain->parent->fwnode;
3665 fwspec.param_count = 3;
3666 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3667 fwspec.param[1] = hwirq;
3668 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3669 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3670 fwspec.fwnode = domain->parent->fwnode;
3671 fwspec.param_count = 2;
3672 fwspec.param[0] = hwirq;
3673 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3674 } else {
3675 return -EINVAL;
3676 }
3677
3678 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3679 }
3680
its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)3681 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3682 unsigned int nr_irqs, void *args)
3683 {
3684 msi_alloc_info_t *info = args;
3685 struct its_device *its_dev = info->scratchpad[0].ptr;
3686 struct its_node *its = its_dev->its;
3687 struct irq_data *irqd;
3688 irq_hw_number_t hwirq;
3689 int err;
3690 int i;
3691
3692 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3693 if (err)
3694 return err;
3695
3696 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3697 if (err)
3698 return err;
3699
3700 for (i = 0; i < nr_irqs; i++) {
3701 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3702 if (err)
3703 return err;
3704
3705 irq_domain_set_hwirq_and_chip(domain, virq + i,
3706 hwirq + i, &its_irq_chip, its_dev);
3707 irqd = irq_get_irq_data(virq + i);
3708 irqd_set_single_target(irqd);
3709 irqd_set_affinity_on_activate(irqd);
3710 irqd_set_resend_when_in_progress(irqd);
3711 pr_debug("ID:%d pID:%d vID:%d\n",
3712 (int)(hwirq + i - its_dev->event_map.lpi_base),
3713 (int)(hwirq + i), virq + i);
3714 }
3715
3716 return 0;
3717 }
3718
its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)3719 static int its_irq_domain_activate(struct irq_domain *domain,
3720 struct irq_data *d, bool reserve)
3721 {
3722 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3723 u32 event = its_get_event_id(d);
3724 int cpu;
3725
3726 cpu = its_select_cpu(d, cpu_online_mask);
3727 if (cpu < 0 || cpu >= nr_cpu_ids)
3728 return -EINVAL;
3729
3730 its_inc_lpi_count(d, cpu);
3731 its_dev->event_map.col_map[event] = cpu;
3732 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3733
3734 /* Map the GIC IRQ and event to the device */
3735 its_send_mapti(its_dev, d->hwirq, event);
3736 return 0;
3737 }
3738
its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)3739 static void its_irq_domain_deactivate(struct irq_domain *domain,
3740 struct irq_data *d)
3741 {
3742 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3743 u32 event = its_get_event_id(d);
3744
3745 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3746 /* Stop the delivery of interrupts */
3747 its_send_discard(its_dev, event);
3748 }
3749
its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)3750 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3751 unsigned int nr_irqs)
3752 {
3753 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3754 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3755 int i;
3756
3757 bitmap_release_region(its_dev->event_map.lpi_map,
3758 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3759 get_count_order(nr_irqs));
3760
3761 for (i = 0; i < nr_irqs; i++) {
3762 struct irq_data *data = irq_domain_get_irq_data(domain,
3763 virq + i);
3764 /* Nuke the entry in the domain */
3765 irq_domain_reset_irq_data(data);
3766 }
3767
3768 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3769 }
3770
3771 static const struct irq_domain_ops its_domain_ops = {
3772 .select = msi_lib_irq_domain_select,
3773 .alloc = its_irq_domain_alloc,
3774 .free = its_irq_domain_free,
3775 .activate = its_irq_domain_activate,
3776 .deactivate = its_irq_domain_deactivate,
3777 };
3778
3779 /*
3780 * This is insane.
3781 *
3782 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3783 * likely), the only way to perform an invalidate is to use a fake
3784 * device to issue an INV command, implying that the LPI has first
3785 * been mapped to some event on that device. Since this is not exactly
3786 * cheap, we try to keep that mapping around as long as possible, and
3787 * only issue an UNMAP if we're short on available slots.
3788 *
3789 * Broken by design(tm).
3790 *
3791 * GICv4.1, on the other hand, mandates that we're able to invalidate
3792 * by writing to a MMIO register. It doesn't implement the whole of
3793 * DirectLPI, but that's good enough. And most of the time, we don't
3794 * even have to invalidate anything, as the redistributor can be told
3795 * whether to generate a doorbell or not (we thus leave it enabled,
3796 * always).
3797 */
its_vpe_db_proxy_unmap_locked(struct its_vpe * vpe)3798 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3799 {
3800 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3801 if (gic_rdists->has_rvpeid)
3802 return;
3803
3804 /* Already unmapped? */
3805 if (vpe->vpe_proxy_event == -1)
3806 return;
3807
3808 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3809 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3810
3811 /*
3812 * We don't track empty slots at all, so let's move the
3813 * next_victim pointer if we can quickly reuse that slot
3814 * instead of nuking an existing entry. Not clear that this is
3815 * always a win though, and this might just generate a ripple
3816 * effect... Let's just hope VPEs don't migrate too often.
3817 */
3818 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3819 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3820
3821 vpe->vpe_proxy_event = -1;
3822 }
3823
its_vpe_db_proxy_unmap(struct its_vpe * vpe)3824 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3825 {
3826 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3827 if (gic_rdists->has_rvpeid)
3828 return;
3829
3830 if (!gic_rdists->has_direct_lpi) {
3831 unsigned long flags;
3832
3833 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3834 its_vpe_db_proxy_unmap_locked(vpe);
3835 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3836 }
3837 }
3838
its_vpe_db_proxy_map_locked(struct its_vpe * vpe)3839 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3840 {
3841 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3842 if (gic_rdists->has_rvpeid)
3843 return;
3844
3845 /* Already mapped? */
3846 if (vpe->vpe_proxy_event != -1)
3847 return;
3848
3849 /* This slot was already allocated. Kick the other VPE out. */
3850 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3851 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3852
3853 /* Map the new VPE instead */
3854 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3855 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3856 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3857
3858 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3859 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3860 }
3861
its_vpe_db_proxy_move(struct its_vpe * vpe,int from,int to)3862 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3863 {
3864 unsigned long flags;
3865 struct its_collection *target_col;
3866
3867 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3868 if (gic_rdists->has_rvpeid)
3869 return;
3870
3871 if (gic_rdists->has_direct_lpi) {
3872 void __iomem *rdbase;
3873
3874 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3875 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3876 wait_for_syncr(rdbase);
3877
3878 return;
3879 }
3880
3881 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3882
3883 its_vpe_db_proxy_map_locked(vpe);
3884
3885 target_col = &vpe_proxy.dev->its->collections[to];
3886 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3887 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3888
3889 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3890 }
3891
its_vpe_4_1_invall_locked(int cpu,struct its_vpe * vpe)3892 static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
3893 {
3894 void __iomem *rdbase;
3895 u64 val;
3896
3897 val = GICR_INVALLR_V;
3898 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3899
3900 guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
3901 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
3902 gic_write_lpir(val, rdbase + GICR_INVALLR);
3903 wait_for_syncr(rdbase);
3904 }
3905
its_vpe_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)3906 static int its_vpe_set_affinity(struct irq_data *d,
3907 const struct cpumask *mask_val,
3908 bool force)
3909 {
3910 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3911 unsigned int from, cpu = nr_cpu_ids;
3912 struct cpumask *table_mask;
3913 struct its_node *its;
3914 unsigned long flags;
3915
3916 /*
3917 * Check if we're racing against a VPE being destroyed, for
3918 * which we don't want to allow a VMOVP.
3919 */
3920 if (!atomic_read(&vpe->vmapp_count)) {
3921 if (gic_requires_eager_mapping())
3922 return -EINVAL;
3923
3924 /*
3925 * If we lazily map the VPEs, this isn't an error and
3926 * we can exit cleanly.
3927 */
3928 cpu = cpumask_first(mask_val);
3929 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3930 return IRQ_SET_MASK_OK_DONE;
3931 }
3932
3933 /*
3934 * Changing affinity is mega expensive, so let's be as lazy as
3935 * we can and only do it if we really have to. Also, if mapped
3936 * into the proxy device, we need to move the doorbell
3937 * interrupt to its new location.
3938 *
3939 * Another thing is that changing the affinity of a vPE affects
3940 * *other interrupts* such as all the vLPIs that are routed to
3941 * this vPE. This means that the irq_desc lock is not enough to
3942 * protect us, and that we must ensure nobody samples vpe->col_idx
3943 * during the update, hence the lock below which must also be
3944 * taken on any vLPI handling path that evaluates vpe->col_idx.
3945 *
3946 * Finally, we must protect ourselves against concurrent updates of
3947 * the mapping state on this VM should the ITS list be in use (see
3948 * the shortcut in its_send_vmovp() otherewise).
3949 */
3950 if (its_list_map)
3951 raw_spin_lock(&vpe->its_vm->vmapp_lock);
3952
3953 from = vpe_to_cpuid_lock(vpe, &flags);
3954 table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3955
3956 /*
3957 * If we are offered another CPU in the same GICv4.1 ITS
3958 * affinity, pick this one. Otherwise, any CPU will do.
3959 */
3960 if (table_mask)
3961 cpu = cpumask_any_and(mask_val, table_mask);
3962 if (cpu < nr_cpu_ids) {
3963 if (cpumask_test_cpu(from, mask_val) &&
3964 cpumask_test_cpu(from, table_mask))
3965 cpu = from;
3966 } else {
3967 cpu = cpumask_first(mask_val);
3968 }
3969
3970 if (from == cpu)
3971 goto out;
3972
3973 vpe->col_idx = cpu;
3974
3975 its_send_vmovp(vpe);
3976
3977 its = find_4_1_its();
3978 if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
3979 its_vpe_4_1_invall_locked(cpu, vpe);
3980
3981 its_vpe_db_proxy_move(vpe, from, cpu);
3982
3983 out:
3984 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3985 vpe_to_cpuid_unlock(vpe, flags);
3986
3987 if (its_list_map)
3988 raw_spin_unlock(&vpe->its_vm->vmapp_lock);
3989
3990 return IRQ_SET_MASK_OK_DONE;
3991 }
3992
its_wait_vpt_parse_complete(void)3993 static void its_wait_vpt_parse_complete(void)
3994 {
3995 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3996 u64 val;
3997
3998 if (!gic_rdists->has_vpend_valid_dirty)
3999 return;
4000
4001 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
4002 val,
4003 !(val & GICR_VPENDBASER_Dirty),
4004 1, 500));
4005 }
4006
its_vpe_schedule(struct its_vpe * vpe)4007 static void its_vpe_schedule(struct its_vpe *vpe)
4008 {
4009 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4010 u64 val;
4011
4012 /* Schedule the VPE */
4013 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
4014 GENMASK_ULL(51, 12);
4015 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
4016 if (rdists_support_shareable()) {
4017 val |= GICR_VPROPBASER_RaWb;
4018 val |= GICR_VPROPBASER_InnerShareable;
4019 }
4020 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
4021
4022 val = virt_to_phys(page_address(vpe->vpt_page)) &
4023 GENMASK_ULL(51, 16);
4024 if (rdists_support_shareable()) {
4025 val |= GICR_VPENDBASER_RaWaWb;
4026 val |= GICR_VPENDBASER_InnerShareable;
4027 }
4028 /*
4029 * There is no good way of finding out if the pending table is
4030 * empty as we can race against the doorbell interrupt very
4031 * easily. So in the end, vpe->pending_last is only an
4032 * indication that the vcpu has something pending, not one
4033 * that the pending table is empty. A good implementation
4034 * would be able to read its coarse map pretty quickly anyway,
4035 * making this a tolerable issue.
4036 */
4037 val |= GICR_VPENDBASER_PendingLast;
4038 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
4039 val |= GICR_VPENDBASER_Valid;
4040 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4041 }
4042
its_vpe_deschedule(struct its_vpe * vpe)4043 static void its_vpe_deschedule(struct its_vpe *vpe)
4044 {
4045 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4046 u64 val;
4047
4048 val = its_clear_vpend_valid(vlpi_base, 0, 0);
4049
4050 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
4051 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4052 }
4053
its_vpe_invall(struct its_vpe * vpe)4054 static void its_vpe_invall(struct its_vpe *vpe)
4055 {
4056 struct its_node *its;
4057
4058 guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
4059
4060 list_for_each_entry(its, &its_nodes, entry) {
4061 if (!is_v4(its))
4062 continue;
4063
4064 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
4065 continue;
4066
4067 /*
4068 * Sending a VINVALL to a single ITS is enough, as all
4069 * we need is to reach the redistributors.
4070 */
4071 its_send_vinvall(its, vpe);
4072 return;
4073 }
4074 }
4075
its_vpe_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4076 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4077 {
4078 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4079 struct its_cmd_info *info = vcpu_info;
4080
4081 switch (info->cmd_type) {
4082 case SCHEDULE_VPE:
4083 its_vpe_schedule(vpe);
4084 return 0;
4085
4086 case DESCHEDULE_VPE:
4087 its_vpe_deschedule(vpe);
4088 return 0;
4089
4090 case COMMIT_VPE:
4091 its_wait_vpt_parse_complete();
4092 return 0;
4093
4094 case INVALL_VPE:
4095 its_vpe_invall(vpe);
4096 return 0;
4097
4098 default:
4099 return -EINVAL;
4100 }
4101 }
4102
its_vpe_send_cmd(struct its_vpe * vpe,void (* cmd)(struct its_device *,u32))4103 static void its_vpe_send_cmd(struct its_vpe *vpe,
4104 void (*cmd)(struct its_device *, u32))
4105 {
4106 unsigned long flags;
4107
4108 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
4109
4110 its_vpe_db_proxy_map_locked(vpe);
4111 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
4112
4113 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
4114 }
4115
its_vpe_send_inv(struct irq_data * d)4116 static void its_vpe_send_inv(struct irq_data *d)
4117 {
4118 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4119
4120 if (gic_rdists->has_direct_lpi)
4121 __direct_lpi_inv(d, d->parent_data->hwirq);
4122 else
4123 its_vpe_send_cmd(vpe, its_send_inv);
4124 }
4125
its_vpe_mask_irq(struct irq_data * d)4126 static void its_vpe_mask_irq(struct irq_data *d)
4127 {
4128 /*
4129 * We need to unmask the LPI, which is described by the parent
4130 * irq_data. Instead of calling into the parent (which won't
4131 * exactly do the right thing, let's simply use the
4132 * parent_data pointer. Yes, I'm naughty.
4133 */
4134 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4135 its_vpe_send_inv(d);
4136 }
4137
its_vpe_unmask_irq(struct irq_data * d)4138 static void its_vpe_unmask_irq(struct irq_data *d)
4139 {
4140 /* Same hack as above... */
4141 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4142 its_vpe_send_inv(d);
4143 }
4144
its_vpe_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4145 static int its_vpe_set_irqchip_state(struct irq_data *d,
4146 enum irqchip_irq_state which,
4147 bool state)
4148 {
4149 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4150
4151 if (which != IRQCHIP_STATE_PENDING)
4152 return -EINVAL;
4153
4154 if (gic_rdists->has_direct_lpi) {
4155 void __iomem *rdbase;
4156
4157 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4158 if (state) {
4159 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4160 } else {
4161 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4162 wait_for_syncr(rdbase);
4163 }
4164 } else {
4165 if (state)
4166 its_vpe_send_cmd(vpe, its_send_int);
4167 else
4168 its_vpe_send_cmd(vpe, its_send_clear);
4169 }
4170
4171 return 0;
4172 }
4173
its_vpe_retrigger(struct irq_data * d)4174 static int its_vpe_retrigger(struct irq_data *d)
4175 {
4176 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4177 }
4178
4179 static struct irq_chip its_vpe_irq_chip = {
4180 .name = "GICv4-vpe",
4181 .irq_mask = its_vpe_mask_irq,
4182 .irq_unmask = its_vpe_unmask_irq,
4183 .irq_eoi = irq_chip_eoi_parent,
4184 .irq_set_affinity = its_vpe_set_affinity,
4185 .irq_retrigger = its_vpe_retrigger,
4186 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4187 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4188 };
4189
find_4_1_its(void)4190 static struct its_node *find_4_1_its(void)
4191 {
4192 struct its_node *its = *this_cpu_ptr(&local_4_1_its);
4193
4194 if (!its) {
4195 list_for_each_entry(its, &its_nodes, entry) {
4196 if (is_v4_1(its))
4197 return its;
4198 }
4199
4200 /* Oops? */
4201 its = NULL;
4202 }
4203
4204 return its;
4205 }
4206
its_vpe_4_1_send_inv(struct irq_data * d)4207 static void its_vpe_4_1_send_inv(struct irq_data *d)
4208 {
4209 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4210 struct its_node *its;
4211
4212 /*
4213 * GICv4.1 wants doorbells to be invalidated using the
4214 * INVDB command in order to be broadcast to all RDs. Send
4215 * it to the first valid ITS, and let the HW do its magic.
4216 */
4217 its = find_4_1_its();
4218 if (its)
4219 its_send_invdb(its, vpe);
4220 }
4221
its_vpe_4_1_mask_irq(struct irq_data * d)4222 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4223 {
4224 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4225 its_vpe_4_1_send_inv(d);
4226 }
4227
its_vpe_4_1_unmask_irq(struct irq_data * d)4228 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4229 {
4230 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4231 its_vpe_4_1_send_inv(d);
4232 }
4233
its_vpe_4_1_schedule(struct its_vpe * vpe,struct its_cmd_info * info)4234 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4235 struct its_cmd_info *info)
4236 {
4237 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4238 u64 val = 0;
4239
4240 /* Schedule the VPE */
4241 val |= GICR_VPENDBASER_Valid;
4242 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4243 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4244 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4245
4246 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4247 }
4248
its_vpe_4_1_deschedule(struct its_vpe * vpe,struct its_cmd_info * info)4249 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4250 struct its_cmd_info *info)
4251 {
4252 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4253 u64 val;
4254
4255 if (info->req_db) {
4256 unsigned long flags;
4257
4258 /*
4259 * vPE is going to block: make the vPE non-resident with
4260 * PendingLast clear and DB set. The GIC guarantees that if
4261 * we read-back PendingLast clear, then a doorbell will be
4262 * delivered when an interrupt comes.
4263 *
4264 * Note the locking to deal with the concurrent update of
4265 * pending_last from the doorbell interrupt handler that can
4266 * run concurrently.
4267 */
4268 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4269 val = its_clear_vpend_valid(vlpi_base,
4270 GICR_VPENDBASER_PendingLast,
4271 GICR_VPENDBASER_4_1_DB);
4272 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4273 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4274 } else {
4275 /*
4276 * We're not blocking, so just make the vPE non-resident
4277 * with PendingLast set, indicating that we'll be back.
4278 */
4279 val = its_clear_vpend_valid(vlpi_base,
4280 0,
4281 GICR_VPENDBASER_PendingLast);
4282 vpe->pending_last = true;
4283 }
4284 }
4285
its_vpe_4_1_invall(struct its_vpe * vpe)4286 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4287 {
4288 unsigned long flags;
4289 int cpu;
4290
4291 /* Target the redistributor this vPE is currently known on */
4292 cpu = vpe_to_cpuid_lock(vpe, &flags);
4293 its_vpe_4_1_invall_locked(cpu, vpe);
4294 vpe_to_cpuid_unlock(vpe, flags);
4295 }
4296
its_vpe_4_1_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4297 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4298 {
4299 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4300 struct its_cmd_info *info = vcpu_info;
4301
4302 switch (info->cmd_type) {
4303 case SCHEDULE_VPE:
4304 its_vpe_4_1_schedule(vpe, info);
4305 return 0;
4306
4307 case DESCHEDULE_VPE:
4308 its_vpe_4_1_deschedule(vpe, info);
4309 return 0;
4310
4311 case COMMIT_VPE:
4312 its_wait_vpt_parse_complete();
4313 return 0;
4314
4315 case INVALL_VPE:
4316 its_vpe_4_1_invall(vpe);
4317 return 0;
4318
4319 default:
4320 return -EINVAL;
4321 }
4322 }
4323
4324 static struct irq_chip its_vpe_4_1_irq_chip = {
4325 .name = "GICv4.1-vpe",
4326 .irq_mask = its_vpe_4_1_mask_irq,
4327 .irq_unmask = its_vpe_4_1_unmask_irq,
4328 .irq_eoi = irq_chip_eoi_parent,
4329 .irq_set_affinity = its_vpe_set_affinity,
4330 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4331 };
4332
its_configure_sgi(struct irq_data * d,bool clear)4333 static void its_configure_sgi(struct irq_data *d, bool clear)
4334 {
4335 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4336 struct its_cmd_desc desc;
4337
4338 desc.its_vsgi_cmd.vpe = vpe;
4339 desc.its_vsgi_cmd.sgi = d->hwirq;
4340 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4341 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4342 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4343 desc.its_vsgi_cmd.clear = clear;
4344
4345 /*
4346 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4347 * destination VPE is mapped there. Since we map them eagerly at
4348 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4349 */
4350 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4351 }
4352
its_sgi_mask_irq(struct irq_data * d)4353 static void its_sgi_mask_irq(struct irq_data *d)
4354 {
4355 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4356
4357 vpe->sgi_config[d->hwirq].enabled = false;
4358 its_configure_sgi(d, false);
4359 }
4360
its_sgi_unmask_irq(struct irq_data * d)4361 static void its_sgi_unmask_irq(struct irq_data *d)
4362 {
4363 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4364
4365 vpe->sgi_config[d->hwirq].enabled = true;
4366 its_configure_sgi(d, false);
4367 }
4368
its_sgi_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)4369 static int its_sgi_set_affinity(struct irq_data *d,
4370 const struct cpumask *mask_val,
4371 bool force)
4372 {
4373 /*
4374 * There is no notion of affinity for virtual SGIs, at least
4375 * not on the host (since they can only be targeting a vPE).
4376 * Tell the kernel we've done whatever it asked for.
4377 */
4378 irq_data_update_effective_affinity(d, mask_val);
4379 return IRQ_SET_MASK_OK;
4380 }
4381
its_sgi_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4382 static int its_sgi_set_irqchip_state(struct irq_data *d,
4383 enum irqchip_irq_state which,
4384 bool state)
4385 {
4386 if (which != IRQCHIP_STATE_PENDING)
4387 return -EINVAL;
4388
4389 if (state) {
4390 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4391 struct its_node *its = find_4_1_its();
4392 u64 val;
4393
4394 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4395 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4396 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4397 } else {
4398 its_configure_sgi(d, true);
4399 }
4400
4401 return 0;
4402 }
4403
its_sgi_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)4404 static int its_sgi_get_irqchip_state(struct irq_data *d,
4405 enum irqchip_irq_state which, bool *val)
4406 {
4407 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4408 void __iomem *base;
4409 unsigned long flags;
4410 u32 count = 1000000; /* 1s! */
4411 u32 status;
4412 int cpu;
4413
4414 if (which != IRQCHIP_STATE_PENDING)
4415 return -EINVAL;
4416
4417 /*
4418 * Locking galore! We can race against two different events:
4419 *
4420 * - Concurrent vPE affinity change: we must make sure it cannot
4421 * happen, or we'll talk to the wrong redistributor. This is
4422 * identical to what happens with vLPIs.
4423 *
4424 * - Concurrent VSGIPENDR access: As it involves accessing two
4425 * MMIO registers, this must be made atomic one way or another.
4426 */
4427 cpu = vpe_to_cpuid_lock(vpe, &flags);
4428 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4429 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4430 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4431 do {
4432 status = readl_relaxed(base + GICR_VSGIPENDR);
4433 if (!(status & GICR_VSGIPENDR_BUSY))
4434 goto out;
4435
4436 count--;
4437 if (!count) {
4438 pr_err_ratelimited("Unable to get SGI status\n");
4439 goto out;
4440 }
4441 cpu_relax();
4442 udelay(1);
4443 } while (count);
4444
4445 out:
4446 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4447 vpe_to_cpuid_unlock(vpe, flags);
4448
4449 if (!count)
4450 return -ENXIO;
4451
4452 *val = !!(status & (1 << d->hwirq));
4453
4454 return 0;
4455 }
4456
its_sgi_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4457 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4458 {
4459 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4460 struct its_cmd_info *info = vcpu_info;
4461
4462 switch (info->cmd_type) {
4463 case PROP_UPDATE_VSGI:
4464 vpe->sgi_config[d->hwirq].priority = info->priority;
4465 vpe->sgi_config[d->hwirq].group = info->group;
4466 its_configure_sgi(d, false);
4467 return 0;
4468
4469 default:
4470 return -EINVAL;
4471 }
4472 }
4473
4474 static struct irq_chip its_sgi_irq_chip = {
4475 .name = "GICv4.1-sgi",
4476 .irq_mask = its_sgi_mask_irq,
4477 .irq_unmask = its_sgi_unmask_irq,
4478 .irq_set_affinity = its_sgi_set_affinity,
4479 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4480 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4481 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4482 };
4483
its_sgi_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4484 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4485 unsigned int virq, unsigned int nr_irqs,
4486 void *args)
4487 {
4488 struct its_vpe *vpe = args;
4489 int i;
4490
4491 /* Yes, we do want 16 SGIs */
4492 WARN_ON(nr_irqs != 16);
4493
4494 for (i = 0; i < 16; i++) {
4495 vpe->sgi_config[i].priority = 0;
4496 vpe->sgi_config[i].enabled = false;
4497 vpe->sgi_config[i].group = false;
4498
4499 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4500 &its_sgi_irq_chip, vpe);
4501 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4502 }
4503
4504 return 0;
4505 }
4506
its_sgi_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4507 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4508 unsigned int virq,
4509 unsigned int nr_irqs)
4510 {
4511 /* Nothing to do */
4512 }
4513
its_sgi_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4514 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4515 struct irq_data *d, bool reserve)
4516 {
4517 /* Write out the initial SGI configuration */
4518 its_configure_sgi(d, false);
4519 return 0;
4520 }
4521
its_sgi_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4522 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4523 struct irq_data *d)
4524 {
4525 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4526
4527 /*
4528 * The VSGI command is awkward:
4529 *
4530 * - To change the configuration, CLEAR must be set to false,
4531 * leaving the pending bit unchanged.
4532 * - To clear the pending bit, CLEAR must be set to true, leaving
4533 * the configuration unchanged.
4534 *
4535 * You just can't do both at once, hence the two commands below.
4536 */
4537 vpe->sgi_config[d->hwirq].enabled = false;
4538 its_configure_sgi(d, false);
4539 its_configure_sgi(d, true);
4540 }
4541
4542 static const struct irq_domain_ops its_sgi_domain_ops = {
4543 .alloc = its_sgi_irq_domain_alloc,
4544 .free = its_sgi_irq_domain_free,
4545 .activate = its_sgi_irq_domain_activate,
4546 .deactivate = its_sgi_irq_domain_deactivate,
4547 };
4548
its_vpe_id_alloc(void)4549 static int its_vpe_id_alloc(void)
4550 {
4551 return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
4552 }
4553
its_vpe_id_free(u16 id)4554 static void its_vpe_id_free(u16 id)
4555 {
4556 ida_free(&its_vpeid_ida, id);
4557 }
4558
its_vpe_init(struct its_vpe * vpe)4559 static int its_vpe_init(struct its_vpe *vpe)
4560 {
4561 struct page *vpt_page;
4562 int vpe_id;
4563
4564 /* Allocate vpe_id */
4565 vpe_id = its_vpe_id_alloc();
4566 if (vpe_id < 0)
4567 return vpe_id;
4568
4569 /* Allocate VPT */
4570 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4571 if (!vpt_page) {
4572 its_vpe_id_free(vpe_id);
4573 return -ENOMEM;
4574 }
4575
4576 if (!its_alloc_vpe_table(vpe_id)) {
4577 its_vpe_id_free(vpe_id);
4578 its_free_pending_table(vpt_page);
4579 return -ENOMEM;
4580 }
4581
4582 raw_spin_lock_init(&vpe->vpe_lock);
4583 vpe->vpe_id = vpe_id;
4584 vpe->vpt_page = vpt_page;
4585 atomic_set(&vpe->vmapp_count, 0);
4586 if (!gic_rdists->has_rvpeid)
4587 vpe->vpe_proxy_event = -1;
4588
4589 return 0;
4590 }
4591
its_vpe_teardown(struct its_vpe * vpe)4592 static void its_vpe_teardown(struct its_vpe *vpe)
4593 {
4594 its_vpe_db_proxy_unmap(vpe);
4595 its_vpe_id_free(vpe->vpe_id);
4596 its_free_pending_table(vpe->vpt_page);
4597 }
4598
its_vpe_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4599 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4600 unsigned int virq,
4601 unsigned int nr_irqs)
4602 {
4603 struct its_vm *vm = domain->host_data;
4604 int i;
4605
4606 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4607
4608 for (i = 0; i < nr_irqs; i++) {
4609 struct irq_data *data = irq_domain_get_irq_data(domain,
4610 virq + i);
4611 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4612
4613 BUG_ON(vm != vpe->its_vm);
4614
4615 clear_bit(data->hwirq, vm->db_bitmap);
4616 its_vpe_teardown(vpe);
4617 irq_domain_reset_irq_data(data);
4618 }
4619
4620 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4621 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4622 its_free_prop_table(vm->vprop_page);
4623 }
4624 }
4625
its_vpe_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4626 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4627 unsigned int nr_irqs, void *args)
4628 {
4629 struct irq_chip *irqchip = &its_vpe_irq_chip;
4630 struct its_vm *vm = args;
4631 unsigned long *bitmap;
4632 struct page *vprop_page;
4633 int base, nr_ids, i, err = 0;
4634
4635 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4636 if (!bitmap)
4637 return -ENOMEM;
4638
4639 if (nr_ids < nr_irqs) {
4640 its_lpi_free(bitmap, base, nr_ids);
4641 return -ENOMEM;
4642 }
4643
4644 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4645 if (!vprop_page) {
4646 its_lpi_free(bitmap, base, nr_ids);
4647 return -ENOMEM;
4648 }
4649
4650 vm->db_bitmap = bitmap;
4651 vm->db_lpi_base = base;
4652 vm->nr_db_lpis = nr_ids;
4653 vm->vprop_page = vprop_page;
4654 raw_spin_lock_init(&vm->vmapp_lock);
4655
4656 if (gic_rdists->has_rvpeid)
4657 irqchip = &its_vpe_4_1_irq_chip;
4658
4659 for (i = 0; i < nr_irqs; i++) {
4660 vm->vpes[i]->vpe_db_lpi = base + i;
4661 err = its_vpe_init(vm->vpes[i]);
4662 if (err)
4663 break;
4664 err = its_irq_gic_domain_alloc(domain, virq + i,
4665 vm->vpes[i]->vpe_db_lpi);
4666 if (err)
4667 break;
4668 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4669 irqchip, vm->vpes[i]);
4670 set_bit(i, bitmap);
4671 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4672 }
4673
4674 if (err)
4675 its_vpe_irq_domain_free(domain, virq, i);
4676
4677 return err;
4678 }
4679
its_vpe_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4680 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4681 struct irq_data *d, bool reserve)
4682 {
4683 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4684 struct its_node *its;
4685
4686 /* Map the VPE to the first possible CPU */
4687 vpe->col_idx = cpumask_first(cpu_online_mask);
4688 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4689
4690 /*
4691 * If we use the list map, we issue VMAPP on demand... Unless
4692 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4693 * so that VSGIs can work.
4694 */
4695 if (!gic_requires_eager_mapping())
4696 return 0;
4697
4698 list_for_each_entry(its, &its_nodes, entry) {
4699 if (!is_v4(its))
4700 continue;
4701
4702 its_send_vmapp(its, vpe, true);
4703 its_send_vinvall(its, vpe);
4704 }
4705
4706 return 0;
4707 }
4708
its_vpe_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4709 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4710 struct irq_data *d)
4711 {
4712 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4713 struct its_node *its;
4714
4715 /*
4716 * If we use the list map on GICv4.0, we unmap the VPE once no
4717 * VLPIs are associated with the VM.
4718 */
4719 if (!gic_requires_eager_mapping())
4720 return;
4721
4722 list_for_each_entry(its, &its_nodes, entry) {
4723 if (!is_v4(its))
4724 continue;
4725
4726 its_send_vmapp(its, vpe, false);
4727 }
4728
4729 /*
4730 * There may be a direct read to the VPT after unmapping the
4731 * vPE, to guarantee the validity of this, we make the VPT
4732 * memory coherent with the CPU caches here.
4733 */
4734 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4735 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4736 LPI_PENDBASE_SZ);
4737 }
4738
4739 static const struct irq_domain_ops its_vpe_domain_ops = {
4740 .alloc = its_vpe_irq_domain_alloc,
4741 .free = its_vpe_irq_domain_free,
4742 .activate = its_vpe_irq_domain_activate,
4743 .deactivate = its_vpe_irq_domain_deactivate,
4744 };
4745
its_force_quiescent(void __iomem * base)4746 static int its_force_quiescent(void __iomem *base)
4747 {
4748 u32 count = 1000000; /* 1s */
4749 u32 val;
4750
4751 val = readl_relaxed(base + GITS_CTLR);
4752 /*
4753 * GIC architecture specification requires the ITS to be both
4754 * disabled and quiescent for writes to GITS_BASER<n> or
4755 * GITS_CBASER to not have UNPREDICTABLE results.
4756 */
4757 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4758 return 0;
4759
4760 /* Disable the generation of all interrupts to this ITS */
4761 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4762 writel_relaxed(val, base + GITS_CTLR);
4763
4764 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4765 while (1) {
4766 val = readl_relaxed(base + GITS_CTLR);
4767 if (val & GITS_CTLR_QUIESCENT)
4768 return 0;
4769
4770 count--;
4771 if (!count)
4772 return -EBUSY;
4773
4774 cpu_relax();
4775 udelay(1);
4776 }
4777 }
4778
its_enable_quirk_cavium_22375(void * data)4779 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4780 {
4781 struct its_node *its = data;
4782
4783 /* erratum 22375: only alloc 8MB table size (20 bits) */
4784 its->typer &= ~GITS_TYPER_DEVBITS;
4785 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4786 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4787
4788 return true;
4789 }
4790
its_enable_quirk_cavium_23144(void * data)4791 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4792 {
4793 struct its_node *its = data;
4794
4795 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4796
4797 return true;
4798 }
4799
its_enable_quirk_qdf2400_e0065(void * data)4800 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4801 {
4802 struct its_node *its = data;
4803
4804 /* On QDF2400, the size of the ITE is 16Bytes */
4805 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4806 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4807
4808 return true;
4809 }
4810
its_irq_get_msi_base_pre_its(struct its_device * its_dev)4811 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4812 {
4813 struct its_node *its = its_dev->its;
4814
4815 /*
4816 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4817 * which maps 32-bit writes targeted at a separate window of
4818 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4819 * with device ID taken from bits [device_id_bits + 1:2] of
4820 * the window offset.
4821 */
4822 return its->pre_its_base + (its_dev->device_id << 2);
4823 }
4824
its_enable_quirk_socionext_synquacer(void * data)4825 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4826 {
4827 struct its_node *its = data;
4828 u32 pre_its_window[2];
4829 u32 ids;
4830
4831 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4832 "socionext,synquacer-pre-its",
4833 pre_its_window,
4834 ARRAY_SIZE(pre_its_window))) {
4835
4836 its->pre_its_base = pre_its_window[0];
4837 its->get_msi_base = its_irq_get_msi_base_pre_its;
4838
4839 ids = ilog2(pre_its_window[1]) - 2;
4840 if (device_ids(its) > ids) {
4841 its->typer &= ~GITS_TYPER_DEVBITS;
4842 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4843 }
4844
4845 /* the pre-ITS breaks isolation, so disable MSI remapping */
4846 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4847 return true;
4848 }
4849 return false;
4850 }
4851
its_enable_quirk_hip07_161600802(void * data)4852 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4853 {
4854 struct its_node *its = data;
4855
4856 /*
4857 * Hip07 insists on using the wrong address for the VLPI
4858 * page. Trick it into doing the right thing...
4859 */
4860 its->vlpi_redist_offset = SZ_128K;
4861 return true;
4862 }
4863
its_enable_rk3588001(void * data)4864 static bool __maybe_unused its_enable_rk3588001(void *data)
4865 {
4866 struct its_node *its = data;
4867
4868 if (!of_machine_is_compatible("rockchip,rk3588") &&
4869 !of_machine_is_compatible("rockchip,rk3588s"))
4870 return false;
4871
4872 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4873 gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4874
4875 return true;
4876 }
4877
its_set_non_coherent(void * data)4878 static bool its_set_non_coherent(void *data)
4879 {
4880 struct its_node *its = data;
4881
4882 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4883 return true;
4884 }
4885
its_enable_quirk_hip09_162100801(void * data)4886 static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
4887 {
4888 struct its_node *its = data;
4889
4890 its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
4891 return true;
4892 }
4893
its_enable_rk3568002(void * data)4894 static bool __maybe_unused its_enable_rk3568002(void *data)
4895 {
4896 if (!of_machine_is_compatible("rockchip,rk3566") &&
4897 !of_machine_is_compatible("rockchip,rk3568"))
4898 return false;
4899
4900 gfp_flags_quirk |= GFP_DMA32;
4901
4902 return true;
4903 }
4904
4905 static const struct gic_quirk its_quirks[] = {
4906 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4907 {
4908 .desc = "ITS: Cavium errata 22375, 24313",
4909 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4910 .mask = 0xffff0fff,
4911 .init = its_enable_quirk_cavium_22375,
4912 },
4913 #endif
4914 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4915 {
4916 .desc = "ITS: Cavium erratum 23144",
4917 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4918 .mask = 0xffff0fff,
4919 .init = its_enable_quirk_cavium_23144,
4920 },
4921 #endif
4922 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4923 {
4924 .desc = "ITS: QDF2400 erratum 0065",
4925 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4926 .mask = 0xffffffff,
4927 .init = its_enable_quirk_qdf2400_e0065,
4928 },
4929 #endif
4930 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4931 {
4932 /*
4933 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4934 * implementation, but with a 'pre-ITS' added that requires
4935 * special handling in software.
4936 */
4937 .desc = "ITS: Socionext Synquacer pre-ITS",
4938 .iidr = 0x0001143b,
4939 .mask = 0xffffffff,
4940 .init = its_enable_quirk_socionext_synquacer,
4941 },
4942 #endif
4943 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4944 {
4945 .desc = "ITS: Hip07 erratum 161600802",
4946 .iidr = 0x00000004,
4947 .mask = 0xffffffff,
4948 .init = its_enable_quirk_hip07_161600802,
4949 },
4950 #endif
4951 #ifdef CONFIG_HISILICON_ERRATUM_162100801
4952 {
4953 .desc = "ITS: Hip09 erratum 162100801",
4954 .iidr = 0x00051736,
4955 .mask = 0xffffffff,
4956 .init = its_enable_quirk_hip09_162100801,
4957 },
4958 #endif
4959 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4960 {
4961 .desc = "ITS: Rockchip erratum RK3588001",
4962 .iidr = 0x0201743b,
4963 .mask = 0xffffffff,
4964 .init = its_enable_rk3588001,
4965 },
4966 #endif
4967 {
4968 .desc = "ITS: non-coherent attribute",
4969 .property = "dma-noncoherent",
4970 .init = its_set_non_coherent,
4971 },
4972 #ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
4973 {
4974 .desc = "ITS: Rockchip erratum RK3568002",
4975 .iidr = 0x0201743b,
4976 .mask = 0xffffffff,
4977 .init = its_enable_rk3568002,
4978 },
4979 #endif
4980 {
4981 }
4982 };
4983
its_enable_quirks(struct its_node * its)4984 static void its_enable_quirks(struct its_node *its)
4985 {
4986 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4987
4988 gic_enable_quirks(iidr, its_quirks, its);
4989
4990 if (is_of_node(its->fwnode_handle))
4991 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4992 its_quirks, its);
4993 }
4994
its_save_disable(void)4995 static int its_save_disable(void)
4996 {
4997 struct its_node *its;
4998 int err = 0;
4999
5000 raw_spin_lock(&its_lock);
5001 list_for_each_entry(its, &its_nodes, entry) {
5002 void __iomem *base;
5003
5004 base = its->base;
5005 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
5006 err = its_force_quiescent(base);
5007 if (err) {
5008 pr_err("ITS@%pa: failed to quiesce: %d\n",
5009 &its->phys_base, err);
5010 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5011 goto err;
5012 }
5013
5014 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
5015 }
5016
5017 err:
5018 if (err) {
5019 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
5020 void __iomem *base;
5021
5022 base = its->base;
5023 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5024 }
5025 }
5026 raw_spin_unlock(&its_lock);
5027
5028 return err;
5029 }
5030
its_restore_enable(void)5031 static void its_restore_enable(void)
5032 {
5033 struct its_node *its;
5034 int ret;
5035
5036 raw_spin_lock(&its_lock);
5037 list_for_each_entry(its, &its_nodes, entry) {
5038 void __iomem *base;
5039 int i;
5040
5041 base = its->base;
5042
5043 /*
5044 * Make sure that the ITS is disabled. If it fails to quiesce,
5045 * don't restore it since writing to CBASER or BASER<n>
5046 * registers is undefined according to the GIC v3 ITS
5047 * Specification.
5048 *
5049 * Firmware resuming with the ITS enabled is terminally broken.
5050 */
5051 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
5052 ret = its_force_quiescent(base);
5053 if (ret) {
5054 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
5055 &its->phys_base, ret);
5056 continue;
5057 }
5058
5059 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
5060
5061 /*
5062 * Writing CBASER resets CREADR to 0, so make CWRITER and
5063 * cmd_write line up with it.
5064 */
5065 its->cmd_write = its->cmd_base;
5066 gits_write_cwriter(0, base + GITS_CWRITER);
5067
5068 /* Restore GITS_BASER from the value cache. */
5069 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
5070 struct its_baser *baser = &its->tables[i];
5071
5072 if (!(baser->val & GITS_BASER_VALID))
5073 continue;
5074
5075 its_write_baser(its, baser, baser->val);
5076 }
5077 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5078
5079 /*
5080 * Reinit the collection if it's stored in the ITS. This is
5081 * indicated by the col_id being less than the HCC field.
5082 * CID < HCC as specified in the GIC v3 Documentation.
5083 */
5084 if (its->collections[smp_processor_id()].col_id <
5085 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
5086 its_cpu_init_collection(its);
5087 }
5088 raw_spin_unlock(&its_lock);
5089 }
5090
5091 static struct syscore_ops its_syscore_ops = {
5092 .suspend = its_save_disable,
5093 .resume = its_restore_enable,
5094 };
5095
its_map_one(struct resource * res,int * err)5096 static void __init __iomem *its_map_one(struct resource *res, int *err)
5097 {
5098 void __iomem *its_base;
5099 u32 val;
5100
5101 its_base = ioremap(res->start, SZ_64K);
5102 if (!its_base) {
5103 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
5104 *err = -ENOMEM;
5105 return NULL;
5106 }
5107
5108 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
5109 if (val != 0x30 && val != 0x40) {
5110 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
5111 *err = -ENODEV;
5112 goto out_unmap;
5113 }
5114
5115 *err = its_force_quiescent(its_base);
5116 if (*err) {
5117 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
5118 goto out_unmap;
5119 }
5120
5121 return its_base;
5122
5123 out_unmap:
5124 iounmap(its_base);
5125 return NULL;
5126 }
5127
its_init_domain(struct its_node * its)5128 static int its_init_domain(struct its_node *its)
5129 {
5130 struct irq_domain_info dom_info = {
5131 .fwnode = its->fwnode_handle,
5132 .ops = &its_domain_ops,
5133 .domain_flags = its->msi_domain_flags,
5134 .parent = its_parent,
5135 };
5136 struct msi_domain_info *info;
5137
5138 info = kzalloc(sizeof(*info), GFP_KERNEL);
5139 if (!info)
5140 return -ENOMEM;
5141
5142 info->ops = &its_msi_domain_ops;
5143 info->data = its;
5144 dom_info.host_data = info;
5145
5146 if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) {
5147 kfree(info);
5148 return -ENOMEM;
5149 }
5150 return 0;
5151 }
5152
its_init_vpe_domain(void)5153 static int its_init_vpe_domain(void)
5154 {
5155 struct its_node *its;
5156 u32 devid;
5157 int entries;
5158
5159 if (gic_rdists->has_direct_lpi) {
5160 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5161 return 0;
5162 }
5163
5164 /* Any ITS will do, even if not v4 */
5165 its = list_first_entry(&its_nodes, struct its_node, entry);
5166
5167 entries = roundup_pow_of_two(nr_cpu_ids);
5168 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
5169 GFP_KERNEL);
5170 if (!vpe_proxy.vpes)
5171 return -ENOMEM;
5172
5173 /* Use the last possible DevID */
5174 devid = GENMASK(device_ids(its) - 1, 0);
5175 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5176 if (!vpe_proxy.dev) {
5177 kfree(vpe_proxy.vpes);
5178 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5179 return -ENOMEM;
5180 }
5181
5182 BUG_ON(entries > vpe_proxy.dev->nr_ites);
5183
5184 raw_spin_lock_init(&vpe_proxy.lock);
5185 vpe_proxy.next_victim = 0;
5186 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5187 devid, vpe_proxy.dev->nr_ites);
5188
5189 return 0;
5190 }
5191
its_compute_its_list_map(struct its_node * its)5192 static int __init its_compute_its_list_map(struct its_node *its)
5193 {
5194 int its_number;
5195 u32 ctlr;
5196
5197 /*
5198 * This is assumed to be done early enough that we're
5199 * guaranteed to be single-threaded, hence no
5200 * locking. Should this change, we should address
5201 * this.
5202 */
5203 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5204 if (its_number >= GICv4_ITS_LIST_MAX) {
5205 pr_err("ITS@%pa: No ITSList entry available!\n",
5206 &its->phys_base);
5207 return -EINVAL;
5208 }
5209
5210 ctlr = readl_relaxed(its->base + GITS_CTLR);
5211 ctlr &= ~GITS_CTLR_ITS_NUMBER;
5212 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5213 writel_relaxed(ctlr, its->base + GITS_CTLR);
5214 ctlr = readl_relaxed(its->base + GITS_CTLR);
5215 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5216 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5217 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5218 }
5219
5220 if (test_and_set_bit(its_number, &its_list_map)) {
5221 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5222 &its->phys_base, its_number);
5223 return -EINVAL;
5224 }
5225
5226 return its_number;
5227 }
5228
its_probe_one(struct its_node * its)5229 static int __init its_probe_one(struct its_node *its)
5230 {
5231 u64 baser, tmp;
5232 struct page *page;
5233 u32 ctlr;
5234 int err;
5235
5236 its_enable_quirks(its);
5237
5238 if (is_v4(its)) {
5239 if (!(its->typer & GITS_TYPER_VMOVP)) {
5240 err = its_compute_its_list_map(its);
5241 if (err < 0)
5242 goto out;
5243
5244 its->list_nr = err;
5245
5246 pr_info("ITS@%pa: Using ITS number %d\n",
5247 &its->phys_base, err);
5248 } else {
5249 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5250 }
5251
5252 if (is_v4_1(its)) {
5253 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5254
5255 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5256 if (!its->sgir_base) {
5257 err = -ENOMEM;
5258 goto out;
5259 }
5260
5261 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5262
5263 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5264 &its->phys_base, its->mpidr, svpet);
5265 }
5266 }
5267
5268 page = its_alloc_pages_node(its->numa_node,
5269 GFP_KERNEL | __GFP_ZERO,
5270 get_order(ITS_CMD_QUEUE_SZ));
5271 if (!page) {
5272 err = -ENOMEM;
5273 goto out_unmap_sgir;
5274 }
5275 its->cmd_base = (void *)page_address(page);
5276 its->cmd_write = its->cmd_base;
5277
5278 err = its_alloc_tables(its);
5279 if (err)
5280 goto out_free_cmd;
5281
5282 err = its_alloc_collections(its);
5283 if (err)
5284 goto out_free_tables;
5285
5286 baser = (virt_to_phys(its->cmd_base) |
5287 GITS_CBASER_RaWaWb |
5288 GITS_CBASER_InnerShareable |
5289 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5290 GITS_CBASER_VALID);
5291
5292 gits_write_cbaser(baser, its->base + GITS_CBASER);
5293 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5294
5295 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5296 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5297
5298 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5299 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5300 /*
5301 * The HW reports non-shareable, we must
5302 * remove the cacheability attributes as
5303 * well.
5304 */
5305 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5306 GITS_CBASER_CACHEABILITY_MASK);
5307 baser |= GITS_CBASER_nC;
5308 gits_write_cbaser(baser, its->base + GITS_CBASER);
5309 }
5310 pr_info("ITS: using cache flushing for cmd queue\n");
5311 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5312 }
5313
5314 gits_write_cwriter(0, its->base + GITS_CWRITER);
5315 ctlr = readl_relaxed(its->base + GITS_CTLR);
5316 ctlr |= GITS_CTLR_ENABLE;
5317 if (is_v4(its))
5318 ctlr |= GITS_CTLR_ImDe;
5319 writel_relaxed(ctlr, its->base + GITS_CTLR);
5320
5321 err = its_init_domain(its);
5322 if (err)
5323 goto out_free_tables;
5324
5325 raw_spin_lock(&its_lock);
5326 list_add(&its->entry, &its_nodes);
5327 raw_spin_unlock(&its_lock);
5328
5329 return 0;
5330
5331 out_free_tables:
5332 its_free_tables(its);
5333 out_free_cmd:
5334 its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5335 out_unmap_sgir:
5336 if (its->sgir_base)
5337 iounmap(its->sgir_base);
5338 out:
5339 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5340 return err;
5341 }
5342
gic_rdists_supports_plpis(void)5343 static bool gic_rdists_supports_plpis(void)
5344 {
5345 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5346 }
5347
redist_disable_lpis(void)5348 static int redist_disable_lpis(void)
5349 {
5350 void __iomem *rbase = gic_data_rdist_rd_base();
5351 u64 timeout = USEC_PER_SEC;
5352 u64 val;
5353
5354 if (!gic_rdists_supports_plpis()) {
5355 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5356 return -ENXIO;
5357 }
5358
5359 val = readl_relaxed(rbase + GICR_CTLR);
5360 if (!(val & GICR_CTLR_ENABLE_LPIS))
5361 return 0;
5362
5363 /*
5364 * If coming via a CPU hotplug event, we don't need to disable
5365 * LPIs before trying to re-enable them. They are already
5366 * configured and all is well in the world.
5367 *
5368 * If running with preallocated tables, there is nothing to do.
5369 */
5370 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5371 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5372 return 0;
5373
5374 /*
5375 * From that point on, we only try to do some damage control.
5376 */
5377 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5378 smp_processor_id());
5379 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5380
5381 /* Disable LPIs */
5382 val &= ~GICR_CTLR_ENABLE_LPIS;
5383 writel_relaxed(val, rbase + GICR_CTLR);
5384
5385 /* Make sure any change to GICR_CTLR is observable by the GIC */
5386 dsb(sy);
5387
5388 /*
5389 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5390 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5391 * Error out if we time out waiting for RWP to clear.
5392 */
5393 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5394 if (!timeout) {
5395 pr_err("CPU%d: Timeout while disabling LPIs\n",
5396 smp_processor_id());
5397 return -ETIMEDOUT;
5398 }
5399 udelay(1);
5400 timeout--;
5401 }
5402
5403 /*
5404 * After it has been written to 1, it is IMPLEMENTATION
5405 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5406 * cleared to 0. Error out if clearing the bit failed.
5407 */
5408 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5409 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5410 return -EBUSY;
5411 }
5412
5413 return 0;
5414 }
5415
its_cpu_init(void)5416 int its_cpu_init(void)
5417 {
5418 if (!list_empty(&its_nodes)) {
5419 int ret;
5420
5421 ret = redist_disable_lpis();
5422 if (ret)
5423 return ret;
5424
5425 its_cpu_init_lpis();
5426 its_cpu_init_collections();
5427 }
5428
5429 return 0;
5430 }
5431
rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct * work)5432 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5433 {
5434 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5435 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5436 }
5437
5438 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5439 rdist_memreserve_cpuhp_cleanup_workfn);
5440
its_cpu_memreserve_lpi(unsigned int cpu)5441 static int its_cpu_memreserve_lpi(unsigned int cpu)
5442 {
5443 struct page *pend_page;
5444 int ret = 0;
5445
5446 /* This gets to run exactly once per CPU */
5447 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5448 return 0;
5449
5450 pend_page = gic_data_rdist()->pend_page;
5451 if (WARN_ON(!pend_page)) {
5452 ret = -ENOMEM;
5453 goto out;
5454 }
5455 /*
5456 * If the pending table was pre-programmed, free the memory we
5457 * preemptively allocated. Otherwise, reserve that memory for
5458 * later kexecs.
5459 */
5460 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5461 its_free_pending_table(pend_page);
5462 gic_data_rdist()->pend_page = NULL;
5463 } else {
5464 phys_addr_t paddr = page_to_phys(pend_page);
5465 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5466 }
5467
5468 out:
5469 /* Last CPU being brought up gets to issue the cleanup */
5470 if (!IS_ENABLED(CONFIG_SMP) ||
5471 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5472 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5473
5474 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5475 return ret;
5476 }
5477
5478 /* Mark all the BASER registers as invalid before they get reprogrammed */
its_reset_one(struct resource * res)5479 static int __init its_reset_one(struct resource *res)
5480 {
5481 void __iomem *its_base;
5482 int err, i;
5483
5484 its_base = its_map_one(res, &err);
5485 if (!its_base)
5486 return err;
5487
5488 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5489 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5490
5491 iounmap(its_base);
5492 return 0;
5493 }
5494
5495 static const struct of_device_id its_device_id[] = {
5496 { .compatible = "arm,gic-v3-its", },
5497 {},
5498 };
5499
its_node_init(struct resource * res,struct fwnode_handle * handle,int numa_node)5500 static struct its_node __init *its_node_init(struct resource *res,
5501 struct fwnode_handle *handle, int numa_node)
5502 {
5503 void __iomem *its_base;
5504 struct its_node *its;
5505 int err;
5506
5507 its_base = its_map_one(res, &err);
5508 if (!its_base)
5509 return NULL;
5510
5511 pr_info("ITS %pR\n", res);
5512
5513 its = kzalloc(sizeof(*its), GFP_KERNEL);
5514 if (!its)
5515 goto out_unmap;
5516
5517 raw_spin_lock_init(&its->lock);
5518 mutex_init(&its->dev_alloc_lock);
5519 INIT_LIST_HEAD(&its->entry);
5520 INIT_LIST_HEAD(&its->its_device_list);
5521
5522 its->typer = gic_read_typer(its_base + GITS_TYPER);
5523 its->base = its_base;
5524 its->phys_base = res->start;
5525 its->get_msi_base = its_irq_get_msi_base;
5526 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
5527
5528 its->numa_node = numa_node;
5529 its->fwnode_handle = handle;
5530
5531 return its;
5532
5533 out_unmap:
5534 iounmap(its_base);
5535 return NULL;
5536 }
5537
its_node_destroy(struct its_node * its)5538 static void its_node_destroy(struct its_node *its)
5539 {
5540 iounmap(its->base);
5541 kfree(its);
5542 }
5543
its_of_probe(struct device_node * node)5544 static int __init its_of_probe(struct device_node *node)
5545 {
5546 struct device_node *np;
5547 struct resource res;
5548 int err;
5549
5550 /*
5551 * Make sure *all* the ITS are reset before we probe any, as
5552 * they may be sharing memory. If any of the ITS fails to
5553 * reset, don't even try to go any further, as this could
5554 * result in something even worse.
5555 */
5556 for (np = of_find_matching_node(node, its_device_id); np;
5557 np = of_find_matching_node(np, its_device_id)) {
5558 if (!of_device_is_available(np) ||
5559 !of_property_read_bool(np, "msi-controller") ||
5560 of_address_to_resource(np, 0, &res))
5561 continue;
5562
5563 err = its_reset_one(&res);
5564 if (err)
5565 return err;
5566 }
5567
5568 for (np = of_find_matching_node(node, its_device_id); np;
5569 np = of_find_matching_node(np, its_device_id)) {
5570 struct its_node *its;
5571
5572 if (!of_device_is_available(np))
5573 continue;
5574 if (!of_property_read_bool(np, "msi-controller")) {
5575 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5576 np);
5577 continue;
5578 }
5579
5580 if (of_address_to_resource(np, 0, &res)) {
5581 pr_warn("%pOF: no regs?\n", np);
5582 continue;
5583 }
5584
5585
5586 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5587 if (!its)
5588 return -ENOMEM;
5589
5590 err = its_probe_one(its);
5591 if (err) {
5592 its_node_destroy(its);
5593 return err;
5594 }
5595 }
5596 return 0;
5597 }
5598
5599 #ifdef CONFIG_ACPI
5600
5601 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5602
5603 #ifdef CONFIG_ACPI_NUMA
5604 struct its_srat_map {
5605 /* numa node id */
5606 u32 numa_node;
5607 /* GIC ITS ID */
5608 u32 its_id;
5609 };
5610
5611 static struct its_srat_map *its_srat_maps __initdata;
5612 static int its_in_srat __initdata;
5613
acpi_get_its_numa_node(u32 its_id)5614 static int __init acpi_get_its_numa_node(u32 its_id)
5615 {
5616 int i;
5617
5618 for (i = 0; i < its_in_srat; i++) {
5619 if (its_id == its_srat_maps[i].its_id)
5620 return its_srat_maps[i].numa_node;
5621 }
5622 return NUMA_NO_NODE;
5623 }
5624
gic_acpi_match_srat_its(union acpi_subtable_headers * header,const unsigned long end)5625 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5626 const unsigned long end)
5627 {
5628 return 0;
5629 }
5630
gic_acpi_parse_srat_its(union acpi_subtable_headers * header,const unsigned long end)5631 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5632 const unsigned long end)
5633 {
5634 int node;
5635 struct acpi_srat_gic_its_affinity *its_affinity;
5636
5637 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5638 if (!its_affinity)
5639 return -EINVAL;
5640
5641 if (its_affinity->header.length < sizeof(*its_affinity)) {
5642 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5643 its_affinity->header.length);
5644 return -EINVAL;
5645 }
5646
5647 /*
5648 * Note that in theory a new proximity node could be created by this
5649 * entry as it is an SRAT resource allocation structure.
5650 * We do not currently support doing so.
5651 */
5652 node = pxm_to_node(its_affinity->proximity_domain);
5653
5654 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5655 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5656 return 0;
5657 }
5658
5659 its_srat_maps[its_in_srat].numa_node = node;
5660 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5661 its_in_srat++;
5662 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5663 its_affinity->proximity_domain, its_affinity->its_id, node);
5664
5665 return 0;
5666 }
5667
acpi_table_parse_srat_its(void)5668 static void __init acpi_table_parse_srat_its(void)
5669 {
5670 int count;
5671
5672 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5673 sizeof(struct acpi_table_srat),
5674 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5675 gic_acpi_match_srat_its, 0);
5676 if (count <= 0)
5677 return;
5678
5679 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5680 GFP_KERNEL);
5681 if (!its_srat_maps)
5682 return;
5683
5684 acpi_table_parse_entries(ACPI_SIG_SRAT,
5685 sizeof(struct acpi_table_srat),
5686 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5687 gic_acpi_parse_srat_its, 0);
5688 }
5689
5690 /* free the its_srat_maps after ITS probing */
acpi_its_srat_maps_free(void)5691 static void __init acpi_its_srat_maps_free(void)
5692 {
5693 kfree(its_srat_maps);
5694 }
5695 #else
acpi_table_parse_srat_its(void)5696 static void __init acpi_table_parse_srat_its(void) { }
acpi_get_its_numa_node(u32 its_id)5697 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
acpi_its_srat_maps_free(void)5698 static void __init acpi_its_srat_maps_free(void) { }
5699 #endif
5700
gic_acpi_parse_madt_its(union acpi_subtable_headers * header,const unsigned long end)5701 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5702 const unsigned long end)
5703 {
5704 struct acpi_madt_generic_translator *its_entry;
5705 struct fwnode_handle *dom_handle;
5706 struct its_node *its;
5707 struct resource res;
5708 int err;
5709
5710 its_entry = (struct acpi_madt_generic_translator *)header;
5711 memset(&res, 0, sizeof(res));
5712 res.start = its_entry->base_address;
5713 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5714 res.flags = IORESOURCE_MEM;
5715
5716 dom_handle = irq_domain_alloc_fwnode(&res.start);
5717 if (!dom_handle) {
5718 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5719 &res.start);
5720 return -ENOMEM;
5721 }
5722
5723 err = iort_register_domain_token(its_entry->translation_id, res.start,
5724 dom_handle);
5725 if (err) {
5726 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5727 &res.start, its_entry->translation_id);
5728 goto dom_err;
5729 }
5730
5731 its = its_node_init(&res, dom_handle,
5732 acpi_get_its_numa_node(its_entry->translation_id));
5733 if (!its) {
5734 err = -ENOMEM;
5735 goto node_err;
5736 }
5737
5738 if (acpi_get_madt_revision() >= 7 &&
5739 (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
5740 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
5741
5742 err = its_probe_one(its);
5743 if (!err)
5744 return 0;
5745
5746 node_err:
5747 iort_deregister_domain_token(its_entry->translation_id);
5748 dom_err:
5749 irq_domain_free_fwnode(dom_handle);
5750 return err;
5751 }
5752
its_acpi_reset(union acpi_subtable_headers * header,const unsigned long end)5753 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5754 const unsigned long end)
5755 {
5756 struct acpi_madt_generic_translator *its_entry;
5757 struct resource res;
5758
5759 its_entry = (struct acpi_madt_generic_translator *)header;
5760 res = (struct resource) {
5761 .start = its_entry->base_address,
5762 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5763 .flags = IORESOURCE_MEM,
5764 };
5765
5766 return its_reset_one(&res);
5767 }
5768
its_acpi_probe(void)5769 static void __init its_acpi_probe(void)
5770 {
5771 acpi_table_parse_srat_its();
5772 /*
5773 * Make sure *all* the ITS are reset before we probe any, as
5774 * they may be sharing memory. If any of the ITS fails to
5775 * reset, don't even try to go any further, as this could
5776 * result in something even worse.
5777 */
5778 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5779 its_acpi_reset, 0) > 0)
5780 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5781 gic_acpi_parse_madt_its, 0);
5782 acpi_its_srat_maps_free();
5783 }
5784 #else
its_acpi_probe(void)5785 static void __init its_acpi_probe(void) { }
5786 #endif
5787
its_lpi_memreserve_init(void)5788 int __init its_lpi_memreserve_init(void)
5789 {
5790 int state;
5791
5792 if (!efi_enabled(EFI_CONFIG_TABLES))
5793 return 0;
5794
5795 if (list_empty(&its_nodes))
5796 return 0;
5797
5798 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5799 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5800 "irqchip/arm/gicv3/memreserve:online",
5801 its_cpu_memreserve_lpi,
5802 NULL);
5803 if (state < 0)
5804 return state;
5805
5806 gic_rdists->cpuhp_memreserve_state = state;
5807
5808 return 0;
5809 }
5810
its_init(struct fwnode_handle * handle,struct rdists * rdists,struct irq_domain * parent_domain,u8 irq_prio)5811 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5812 struct irq_domain *parent_domain, u8 irq_prio)
5813 {
5814 struct device_node *of_node;
5815 struct its_node *its;
5816 bool has_v4 = false;
5817 bool has_v4_1 = false;
5818 int err;
5819
5820 itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
5821 if (!itt_pool)
5822 return -ENOMEM;
5823
5824 gic_rdists = rdists;
5825
5826 lpi_prop_prio = irq_prio;
5827 its_parent = parent_domain;
5828 of_node = to_of_node(handle);
5829 if (of_node)
5830 its_of_probe(of_node);
5831 else
5832 its_acpi_probe();
5833
5834 if (list_empty(&its_nodes)) {
5835 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5836 return -ENXIO;
5837 }
5838
5839 err = allocate_lpi_tables();
5840 if (err)
5841 return err;
5842
5843 list_for_each_entry(its, &its_nodes, entry) {
5844 has_v4 |= is_v4(its);
5845 has_v4_1 |= is_v4_1(its);
5846 }
5847
5848 /* Don't bother with inconsistent systems */
5849 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5850 rdists->has_rvpeid = false;
5851
5852 if (has_v4 & rdists->has_vlpis) {
5853 const struct irq_domain_ops *sgi_ops;
5854
5855 if (has_v4_1)
5856 sgi_ops = &its_sgi_domain_ops;
5857 else
5858 sgi_ops = NULL;
5859
5860 if (its_init_vpe_domain() ||
5861 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5862 rdists->has_vlpis = false;
5863 pr_err("ITS: Disabling GICv4 support\n");
5864 }
5865 }
5866
5867 register_syscore_ops(&its_syscore_ops);
5868
5869 return 0;
5870 }
5871