xref: /linux/drivers/irqchip/irq-gic-v3-its.c (revision afa844360bc527d2a7e9e2542b5bd59b361cfb7c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/genalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/iopoll.h>
19 #include <linux/irqdomain.h>
20 #include <linux/list.h>
21 #include <linux/log2.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/memblock.h>
24 #include <linux/mm.h>
25 #include <linux/msi.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/percpu.h>
32 #include <linux/set_memory.h>
33 #include <linux/slab.h>
34 #include <linux/syscore_ops.h>
35 
36 #include <linux/irqchip.h>
37 #include <linux/irqchip/arm-gic-v3.h>
38 #include <linux/irqchip/arm-gic-v4.h>
39 
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
42 
43 #include "irq-gic-common.h"
44 #include "irq-gic-its-msi-parent.h"
45 #include <linux/irqchip/irq-msi-lib.h>
46 
47 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
49 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
50 #define ITS_FLAGS_FORCE_NON_SHAREABLE		(1ULL << 3)
51 #define ITS_FLAGS_WORKAROUND_HISILICON_162100801	(1ULL << 4)
52 
53 #define RD_LOCAL_LPI_ENABLED                    BIT(0)
54 #define RD_LOCAL_PENDTABLE_PREALLOCATED         BIT(1)
55 #define RD_LOCAL_MEMRESERVE_DONE                BIT(2)
56 
57 static u32 lpi_id_bits;
58 
59 /*
60  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
61  * deal with (one configuration byte per interrupt). PENDBASE has to
62  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
63  */
64 #define LPI_NRBITS		lpi_id_bits
65 #define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
66 #define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
67 
68 static u8 __ro_after_init lpi_prop_prio;
69 static struct its_node *find_4_1_its(void);
70 
71 /*
72  * Collection structure - just an ID, and a redistributor address to
73  * ping. We use one per CPU as a bag of interrupts assigned to this
74  * CPU.
75  */
76 struct its_collection {
77 	u64			target_address;
78 	u16			col_id;
79 };
80 
81 /*
82  * The ITS_BASER structure - contains memory information, cached
83  * value of BASER register configuration and ITS page size.
84  */
85 struct its_baser {
86 	void		*base;
87 	u64		val;
88 	u32		order;
89 	u32		psz;
90 };
91 
92 struct its_device;
93 
94 /*
95  * The ITS structure - contains most of the infrastructure, with the
96  * top-level MSI domain, the command queue, the collections, and the
97  * list of devices writing to it.
98  *
99  * dev_alloc_lock has to be taken for device allocations, while the
100  * spinlock must be taken to parse data structures such as the device
101  * list.
102  */
103 struct its_node {
104 	raw_spinlock_t		lock;
105 	struct mutex		dev_alloc_lock;
106 	struct list_head	entry;
107 	void __iomem		*base;
108 	void __iomem		*sgir_base;
109 	phys_addr_t		phys_base;
110 	struct its_cmd_block	*cmd_base;
111 	struct its_cmd_block	*cmd_write;
112 	struct its_baser	tables[GITS_BASER_NR_REGS];
113 	struct its_collection	*collections;
114 	struct fwnode_handle	*fwnode_handle;
115 	u64			(*get_msi_base)(struct its_device *its_dev);
116 	u64			typer;
117 	u64			cbaser_save;
118 	u32			ctlr_save;
119 	u32			mpidr;
120 	struct list_head	its_device_list;
121 	u64			flags;
122 	unsigned long		list_nr;
123 	int			numa_node;
124 	unsigned int		msi_domain_flags;
125 	u32			pre_its_base; /* for Socionext Synquacer */
126 	int			vlpi_redist_offset;
127 };
128 
129 static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
130 
131 #define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
132 #define is_v4_1(its)		(!!((its)->typer & GITS_TYPER_VMAPP))
133 #define device_ids(its)		(FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
134 
135 #define ITS_ITT_ALIGN		SZ_256
136 
137 /* The maximum number of VPEID bits supported by VLPI commands */
138 #define ITS_MAX_VPEID_BITS						\
139 	({								\
140 		int nvpeid = 16;					\
141 		if (gic_rdists->has_rvpeid &&				\
142 		    gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)		\
143 			nvpeid = 1 + (gic_rdists->gicd_typer2 &		\
144 				      GICD_TYPER2_VID);			\
145 									\
146 		nvpeid;							\
147 	})
148 #define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))
149 
150 /* Convert page order to size in bytes */
151 #define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))
152 
153 struct event_lpi_map {
154 	unsigned long		*lpi_map;
155 	u16			*col_map;
156 	irq_hw_number_t		lpi_base;
157 	int			nr_lpis;
158 	raw_spinlock_t		vlpi_lock;
159 	struct its_vm		*vm;
160 	struct its_vlpi_map	*vlpi_maps;
161 	int			nr_vlpis;
162 };
163 
164 /*
165  * The ITS view of a device - belongs to an ITS, owns an interrupt
166  * translation table, and a list of interrupts.  If it some of its
167  * LPIs are injected into a guest (GICv4), the event_map.vm field
168  * indicates which one.
169  */
170 struct its_device {
171 	struct list_head	entry;
172 	struct its_node		*its;
173 	struct event_lpi_map	event_map;
174 	void			*itt;
175 	u32			itt_sz;
176 	u32			nr_ites;
177 	u32			device_id;
178 	bool			shared;
179 };
180 
181 static struct {
182 	raw_spinlock_t		lock;
183 	struct its_device	*dev;
184 	struct its_vpe		**vpes;
185 	int			next_victim;
186 } vpe_proxy;
187 
188 struct cpu_lpi_count {
189 	atomic_t	managed;
190 	atomic_t	unmanaged;
191 };
192 
193 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
194 
195 static LIST_HEAD(its_nodes);
196 static DEFINE_RAW_SPINLOCK(its_lock);
197 static struct rdists *gic_rdists;
198 static struct irq_domain *its_parent;
199 
200 static unsigned long its_list_map;
201 static u16 vmovp_seq_num;
202 static DEFINE_RAW_SPINLOCK(vmovp_lock);
203 
204 static DEFINE_IDA(its_vpeid_ida);
205 
206 #define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
207 #define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
208 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
209 #define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
210 
211 static gfp_t gfp_flags_quirk;
212 
its_alloc_pages_node(int node,gfp_t gfp,unsigned int order)213 static struct page *its_alloc_pages_node(int node, gfp_t gfp,
214 					 unsigned int order)
215 {
216 	struct page *page;
217 	int ret = 0;
218 
219 	page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
220 
221 	if (!page)
222 		return NULL;
223 
224 	ret = set_memory_decrypted((unsigned long)page_address(page),
225 				   1 << order);
226 	/*
227 	 * If set_memory_decrypted() fails then we don't know what state the
228 	 * page is in, so we can't free it. Instead we leak it.
229 	 * set_memory_decrypted() will already have WARNed.
230 	 */
231 	if (ret)
232 		return NULL;
233 
234 	return page;
235 }
236 
its_alloc_pages(gfp_t gfp,unsigned int order)237 static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
238 {
239 	return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
240 }
241 
its_free_pages(void * addr,unsigned int order)242 static void its_free_pages(void *addr, unsigned int order)
243 {
244 	/*
245 	 * If the memory cannot be encrypted again then we must leak the pages.
246 	 * set_memory_encrypted() will already have WARNed.
247 	 */
248 	if (set_memory_encrypted((unsigned long)addr, 1 << order))
249 		return;
250 	free_pages((unsigned long)addr, order);
251 }
252 
253 static struct gen_pool *itt_pool;
254 
itt_alloc_pool(int node,int size)255 static void *itt_alloc_pool(int node, int size)
256 {
257 	unsigned long addr;
258 	struct page *page;
259 
260 	if (size >= PAGE_SIZE) {
261 		page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
262 
263 		return page ? page_address(page) : NULL;
264 	}
265 
266 	do {
267 		addr = gen_pool_alloc(itt_pool, size);
268 		if (addr)
269 			break;
270 
271 		page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
272 		if (!page)
273 			break;
274 
275 		gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
276 	} while (!addr);
277 
278 	return (void *)addr;
279 }
280 
itt_free_pool(void * addr,int size)281 static void itt_free_pool(void *addr, int size)
282 {
283 	if (!addr)
284 		return;
285 
286 	if (size >= PAGE_SIZE) {
287 		its_free_pages(addr, get_order(size));
288 		return;
289 	}
290 
291 	gen_pool_free(itt_pool, (unsigned long)addr, size);
292 }
293 
294 /*
295  * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
296  * always have vSGIs mapped.
297  */
require_its_list_vmovp(struct its_vm * vm,struct its_node * its)298 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
299 {
300 	return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
301 }
302 
rdists_support_shareable(void)303 static bool rdists_support_shareable(void)
304 {
305 	return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
306 }
307 
get_its_list(struct its_vm * vm)308 static u16 get_its_list(struct its_vm *vm)
309 {
310 	struct its_node *its;
311 	unsigned long its_list = 0;
312 
313 	list_for_each_entry(its, &its_nodes, entry) {
314 		if (!is_v4(its))
315 			continue;
316 
317 		if (require_its_list_vmovp(vm, its))
318 			__set_bit(its->list_nr, &its_list);
319 	}
320 
321 	return (u16)its_list;
322 }
323 
its_get_event_id(struct irq_data * d)324 static inline u32 its_get_event_id(struct irq_data *d)
325 {
326 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
327 	return d->hwirq - its_dev->event_map.lpi_base;
328 }
329 
dev_event_to_col(struct its_device * its_dev,u32 event)330 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
331 					       u32 event)
332 {
333 	struct its_node *its = its_dev->its;
334 
335 	return its->collections + its_dev->event_map.col_map[event];
336 }
337 
dev_event_to_vlpi_map(struct its_device * its_dev,u32 event)338 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
339 					       u32 event)
340 {
341 	if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
342 		return NULL;
343 
344 	return &its_dev->event_map.vlpi_maps[event];
345 }
346 
get_vlpi_map(struct irq_data * d)347 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
348 {
349 	if (irqd_is_forwarded_to_vcpu(d)) {
350 		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
351 		u32 event = its_get_event_id(d);
352 
353 		return dev_event_to_vlpi_map(its_dev, event);
354 	}
355 
356 	return NULL;
357 }
358 
vpe_to_cpuid_lock(struct its_vpe * vpe,unsigned long * flags)359 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
360 {
361 	raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
362 	return vpe->col_idx;
363 }
364 
vpe_to_cpuid_unlock(struct its_vpe * vpe,unsigned long flags)365 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
366 {
367 	raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
368 }
369 
370 static struct irq_chip its_vpe_irq_chip;
371 
irq_to_cpuid_lock(struct irq_data * d,unsigned long * flags)372 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
373 {
374 	struct its_vpe *vpe = NULL;
375 	int cpu;
376 
377 	if (d->chip == &its_vpe_irq_chip) {
378 		vpe = irq_data_get_irq_chip_data(d);
379 	} else {
380 		struct its_vlpi_map *map = get_vlpi_map(d);
381 		if (map)
382 			vpe = map->vpe;
383 	}
384 
385 	if (vpe) {
386 		cpu = vpe_to_cpuid_lock(vpe, flags);
387 	} else {
388 		/* Physical LPIs are already locked via the irq_desc lock */
389 		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
390 		cpu = its_dev->event_map.col_map[its_get_event_id(d)];
391 		/* Keep GCC quiet... */
392 		*flags = 0;
393 	}
394 
395 	return cpu;
396 }
397 
irq_to_cpuid_unlock(struct irq_data * d,unsigned long flags)398 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
399 {
400 	struct its_vpe *vpe = NULL;
401 
402 	if (d->chip == &its_vpe_irq_chip) {
403 		vpe = irq_data_get_irq_chip_data(d);
404 	} else {
405 		struct its_vlpi_map *map = get_vlpi_map(d);
406 		if (map)
407 			vpe = map->vpe;
408 	}
409 
410 	if (vpe)
411 		vpe_to_cpuid_unlock(vpe, flags);
412 }
413 
valid_col(struct its_collection * col)414 static struct its_collection *valid_col(struct its_collection *col)
415 {
416 	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
417 		return NULL;
418 
419 	return col;
420 }
421 
valid_vpe(struct its_node * its,struct its_vpe * vpe)422 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
423 {
424 	if (valid_col(its->collections + vpe->col_idx))
425 		return vpe;
426 
427 	return NULL;
428 }
429 
430 /*
431  * ITS command descriptors - parameters to be encoded in a command
432  * block.
433  */
434 struct its_cmd_desc {
435 	union {
436 		struct {
437 			struct its_device *dev;
438 			u32 event_id;
439 		} its_inv_cmd;
440 
441 		struct {
442 			struct its_device *dev;
443 			u32 event_id;
444 		} its_clear_cmd;
445 
446 		struct {
447 			struct its_device *dev;
448 			u32 event_id;
449 		} its_int_cmd;
450 
451 		struct {
452 			struct its_device *dev;
453 			int valid;
454 		} its_mapd_cmd;
455 
456 		struct {
457 			struct its_collection *col;
458 			int valid;
459 		} its_mapc_cmd;
460 
461 		struct {
462 			struct its_device *dev;
463 			u32 phys_id;
464 			u32 event_id;
465 		} its_mapti_cmd;
466 
467 		struct {
468 			struct its_device *dev;
469 			struct its_collection *col;
470 			u32 event_id;
471 		} its_movi_cmd;
472 
473 		struct {
474 			struct its_device *dev;
475 			u32 event_id;
476 		} its_discard_cmd;
477 
478 		struct {
479 			struct its_collection *col;
480 		} its_invall_cmd;
481 
482 		struct {
483 			struct its_vpe *vpe;
484 		} its_vinvall_cmd;
485 
486 		struct {
487 			struct its_vpe *vpe;
488 			struct its_collection *col;
489 			bool valid;
490 		} its_vmapp_cmd;
491 
492 		struct {
493 			struct its_vpe *vpe;
494 			struct its_device *dev;
495 			u32 virt_id;
496 			u32 event_id;
497 			bool db_enabled;
498 		} its_vmapti_cmd;
499 
500 		struct {
501 			struct its_vpe *vpe;
502 			struct its_device *dev;
503 			u32 event_id;
504 			bool db_enabled;
505 		} its_vmovi_cmd;
506 
507 		struct {
508 			struct its_vpe *vpe;
509 			struct its_collection *col;
510 			u16 seq_num;
511 			u16 its_list;
512 		} its_vmovp_cmd;
513 
514 		struct {
515 			struct its_vpe *vpe;
516 		} its_invdb_cmd;
517 
518 		struct {
519 			struct its_vpe *vpe;
520 			u8 sgi;
521 			u8 priority;
522 			bool enable;
523 			bool group;
524 			bool clear;
525 		} its_vsgi_cmd;
526 	};
527 };
528 
529 /*
530  * The ITS command block, which is what the ITS actually parses.
531  */
532 struct its_cmd_block {
533 	union {
534 		u64	raw_cmd[4];
535 		__le64	raw_cmd_le[4];
536 	};
537 };
538 
539 #define ITS_CMD_QUEUE_SZ		SZ_64K
540 #define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
541 
542 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
543 						    struct its_cmd_block *,
544 						    struct its_cmd_desc *);
545 
546 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
547 					      struct its_cmd_block *,
548 					      struct its_cmd_desc *);
549 
its_mask_encode(u64 * raw_cmd,u64 val,int h,int l)550 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
551 {
552 	u64 mask = GENMASK_ULL(h, l);
553 	*raw_cmd &= ~mask;
554 	*raw_cmd |= (val << l) & mask;
555 }
556 
its_encode_cmd(struct its_cmd_block * cmd,u8 cmd_nr)557 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
558 {
559 	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
560 }
561 
its_encode_devid(struct its_cmd_block * cmd,u32 devid)562 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
563 {
564 	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
565 }
566 
its_encode_event_id(struct its_cmd_block * cmd,u32 id)567 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
568 {
569 	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
570 }
571 
its_encode_phys_id(struct its_cmd_block * cmd,u32 phys_id)572 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
573 {
574 	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
575 }
576 
its_encode_size(struct its_cmd_block * cmd,u8 size)577 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
578 {
579 	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
580 }
581 
its_encode_itt(struct its_cmd_block * cmd,u64 itt_addr)582 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
583 {
584 	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
585 }
586 
its_encode_valid(struct its_cmd_block * cmd,int valid)587 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
588 {
589 	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
590 }
591 
its_encode_target(struct its_cmd_block * cmd,u64 target_addr)592 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
593 {
594 	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
595 }
596 
its_encode_collection(struct its_cmd_block * cmd,u16 col)597 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
598 {
599 	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
600 }
601 
its_encode_vpeid(struct its_cmd_block * cmd,u16 vpeid)602 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
603 {
604 	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
605 }
606 
its_encode_virt_id(struct its_cmd_block * cmd,u32 virt_id)607 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
608 {
609 	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
610 }
611 
its_encode_db_phys_id(struct its_cmd_block * cmd,u32 db_phys_id)612 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
613 {
614 	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
615 }
616 
its_encode_db_valid(struct its_cmd_block * cmd,bool db_valid)617 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
618 {
619 	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
620 }
621 
its_encode_seq_num(struct its_cmd_block * cmd,u16 seq_num)622 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
623 {
624 	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
625 }
626 
its_encode_its_list(struct its_cmd_block * cmd,u16 its_list)627 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
628 {
629 	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
630 }
631 
its_encode_vpt_addr(struct its_cmd_block * cmd,u64 vpt_pa)632 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
633 {
634 	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
635 }
636 
its_encode_vpt_size(struct its_cmd_block * cmd,u8 vpt_size)637 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
638 {
639 	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
640 }
641 
its_encode_vconf_addr(struct its_cmd_block * cmd,u64 vconf_pa)642 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
643 {
644 	its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
645 }
646 
its_encode_alloc(struct its_cmd_block * cmd,bool alloc)647 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
648 {
649 	its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
650 }
651 
its_encode_ptz(struct its_cmd_block * cmd,bool ptz)652 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
653 {
654 	its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
655 }
656 
its_encode_vmapp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)657 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
658 					u32 vpe_db_lpi)
659 {
660 	its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
661 }
662 
its_encode_vmovp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)663 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
664 					u32 vpe_db_lpi)
665 {
666 	its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
667 }
668 
its_encode_db(struct its_cmd_block * cmd,bool db)669 static void its_encode_db(struct its_cmd_block *cmd, bool db)
670 {
671 	its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
672 }
673 
its_encode_sgi_intid(struct its_cmd_block * cmd,u8 sgi)674 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
675 {
676 	its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
677 }
678 
its_encode_sgi_priority(struct its_cmd_block * cmd,u8 prio)679 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
680 {
681 	its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
682 }
683 
its_encode_sgi_group(struct its_cmd_block * cmd,bool grp)684 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
685 {
686 	its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
687 }
688 
its_encode_sgi_clear(struct its_cmd_block * cmd,bool clr)689 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
690 {
691 	its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
692 }
693 
its_encode_sgi_enable(struct its_cmd_block * cmd,bool en)694 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
695 {
696 	its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
697 }
698 
its_fixup_cmd(struct its_cmd_block * cmd)699 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
700 {
701 	/* Let's fixup BE commands */
702 	cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
703 	cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
704 	cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
705 	cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
706 }
707 
its_build_mapd_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)708 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
709 						 struct its_cmd_block *cmd,
710 						 struct its_cmd_desc *desc)
711 {
712 	phys_addr_t itt_addr;
713 	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
714 
715 	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
716 
717 	its_encode_cmd(cmd, GITS_CMD_MAPD);
718 	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
719 	its_encode_size(cmd, size - 1);
720 	its_encode_itt(cmd, itt_addr);
721 	its_encode_valid(cmd, desc->its_mapd_cmd.valid);
722 
723 	its_fixup_cmd(cmd);
724 
725 	return NULL;
726 }
727 
its_build_mapc_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)728 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
729 						 struct its_cmd_block *cmd,
730 						 struct its_cmd_desc *desc)
731 {
732 	its_encode_cmd(cmd, GITS_CMD_MAPC);
733 	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
734 	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
735 	its_encode_valid(cmd, desc->its_mapc_cmd.valid);
736 
737 	its_fixup_cmd(cmd);
738 
739 	return desc->its_mapc_cmd.col;
740 }
741 
its_build_mapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)742 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
743 						  struct its_cmd_block *cmd,
744 						  struct its_cmd_desc *desc)
745 {
746 	struct its_collection *col;
747 
748 	col = dev_event_to_col(desc->its_mapti_cmd.dev,
749 			       desc->its_mapti_cmd.event_id);
750 
751 	its_encode_cmd(cmd, GITS_CMD_MAPTI);
752 	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
753 	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
754 	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
755 	its_encode_collection(cmd, col->col_id);
756 
757 	its_fixup_cmd(cmd);
758 
759 	return valid_col(col);
760 }
761 
its_build_movi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)762 static struct its_collection *its_build_movi_cmd(struct its_node *its,
763 						 struct its_cmd_block *cmd,
764 						 struct its_cmd_desc *desc)
765 {
766 	struct its_collection *col;
767 
768 	col = dev_event_to_col(desc->its_movi_cmd.dev,
769 			       desc->its_movi_cmd.event_id);
770 
771 	its_encode_cmd(cmd, GITS_CMD_MOVI);
772 	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
773 	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
774 	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
775 
776 	its_fixup_cmd(cmd);
777 
778 	return valid_col(col);
779 }
780 
its_build_discard_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)781 static struct its_collection *its_build_discard_cmd(struct its_node *its,
782 						    struct its_cmd_block *cmd,
783 						    struct its_cmd_desc *desc)
784 {
785 	struct its_collection *col;
786 
787 	col = dev_event_to_col(desc->its_discard_cmd.dev,
788 			       desc->its_discard_cmd.event_id);
789 
790 	its_encode_cmd(cmd, GITS_CMD_DISCARD);
791 	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
792 	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
793 
794 	its_fixup_cmd(cmd);
795 
796 	return valid_col(col);
797 }
798 
its_build_inv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)799 static struct its_collection *its_build_inv_cmd(struct its_node *its,
800 						struct its_cmd_block *cmd,
801 						struct its_cmd_desc *desc)
802 {
803 	struct its_collection *col;
804 
805 	col = dev_event_to_col(desc->its_inv_cmd.dev,
806 			       desc->its_inv_cmd.event_id);
807 
808 	its_encode_cmd(cmd, GITS_CMD_INV);
809 	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
810 	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
811 
812 	its_fixup_cmd(cmd);
813 
814 	return valid_col(col);
815 }
816 
its_build_int_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)817 static struct its_collection *its_build_int_cmd(struct its_node *its,
818 						struct its_cmd_block *cmd,
819 						struct its_cmd_desc *desc)
820 {
821 	struct its_collection *col;
822 
823 	col = dev_event_to_col(desc->its_int_cmd.dev,
824 			       desc->its_int_cmd.event_id);
825 
826 	its_encode_cmd(cmd, GITS_CMD_INT);
827 	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
828 	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
829 
830 	its_fixup_cmd(cmd);
831 
832 	return valid_col(col);
833 }
834 
its_build_clear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)835 static struct its_collection *its_build_clear_cmd(struct its_node *its,
836 						  struct its_cmd_block *cmd,
837 						  struct its_cmd_desc *desc)
838 {
839 	struct its_collection *col;
840 
841 	col = dev_event_to_col(desc->its_clear_cmd.dev,
842 			       desc->its_clear_cmd.event_id);
843 
844 	its_encode_cmd(cmd, GITS_CMD_CLEAR);
845 	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
846 	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
847 
848 	its_fixup_cmd(cmd);
849 
850 	return valid_col(col);
851 }
852 
its_build_invall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)853 static struct its_collection *its_build_invall_cmd(struct its_node *its,
854 						   struct its_cmd_block *cmd,
855 						   struct its_cmd_desc *desc)
856 {
857 	its_encode_cmd(cmd, GITS_CMD_INVALL);
858 	its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
859 
860 	its_fixup_cmd(cmd);
861 
862 	return desc->its_invall_cmd.col;
863 }
864 
its_build_vinvall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)865 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
866 					     struct its_cmd_block *cmd,
867 					     struct its_cmd_desc *desc)
868 {
869 	its_encode_cmd(cmd, GITS_CMD_VINVALL);
870 	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
871 
872 	its_fixup_cmd(cmd);
873 
874 	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
875 }
876 
its_build_vmapp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)877 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
878 					   struct its_cmd_block *cmd,
879 					   struct its_cmd_desc *desc)
880 {
881 	struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
882 	phys_addr_t vpt_addr, vconf_addr;
883 	u64 target;
884 	bool alloc;
885 
886 	its_encode_cmd(cmd, GITS_CMD_VMAPP);
887 	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
888 	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
889 
890 	if (!desc->its_vmapp_cmd.valid) {
891 		alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
892 		if (is_v4_1(its)) {
893 			its_encode_alloc(cmd, alloc);
894 			/*
895 			 * Unmapping a VPE is self-synchronizing on GICv4.1,
896 			 * no need to issue a VSYNC.
897 			 */
898 			vpe = NULL;
899 		}
900 
901 		goto out;
902 	}
903 
904 	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
905 	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
906 
907 	its_encode_target(cmd, target);
908 	its_encode_vpt_addr(cmd, vpt_addr);
909 	its_encode_vpt_size(cmd, LPI_NRBITS - 1);
910 
911 	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
912 
913 	if (!is_v4_1(its))
914 		goto out;
915 
916 	vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
917 
918 	its_encode_alloc(cmd, alloc);
919 
920 	/*
921 	 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
922 	 * to be unmapped first, and in this case, we may remap the vPE
923 	 * back while the VPT is not empty. So we can't assume that the
924 	 * VPT is empty on map. This is why we never advertise PTZ.
925 	 */
926 	its_encode_ptz(cmd, false);
927 	its_encode_vconf_addr(cmd, vconf_addr);
928 	its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
929 
930 out:
931 	its_fixup_cmd(cmd);
932 
933 	return vpe;
934 }
935 
its_build_vmapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)936 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
937 					    struct its_cmd_block *cmd,
938 					    struct its_cmd_desc *desc)
939 {
940 	u32 db;
941 
942 	if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
943 		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
944 	else
945 		db = 1023;
946 
947 	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
948 	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
949 	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
950 	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
951 	its_encode_db_phys_id(cmd, db);
952 	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
953 
954 	its_fixup_cmd(cmd);
955 
956 	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
957 }
958 
its_build_vmovi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)959 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
960 					   struct its_cmd_block *cmd,
961 					   struct its_cmd_desc *desc)
962 {
963 	u32 db;
964 
965 	if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
966 		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
967 	else
968 		db = 1023;
969 
970 	its_encode_cmd(cmd, GITS_CMD_VMOVI);
971 	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
972 	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
973 	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
974 	its_encode_db_phys_id(cmd, db);
975 	its_encode_db_valid(cmd, true);
976 
977 	its_fixup_cmd(cmd);
978 
979 	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
980 }
981 
its_build_vmovp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)982 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
983 					   struct its_cmd_block *cmd,
984 					   struct its_cmd_desc *desc)
985 {
986 	u64 target;
987 
988 	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
989 	its_encode_cmd(cmd, GITS_CMD_VMOVP);
990 	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
991 	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
992 	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
993 	its_encode_target(cmd, target);
994 
995 	if (is_v4_1(its)) {
996 		its_encode_db(cmd, true);
997 		its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
998 	}
999 
1000 	its_fixup_cmd(cmd);
1001 
1002 	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
1003 }
1004 
its_build_vinv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1005 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
1006 					  struct its_cmd_block *cmd,
1007 					  struct its_cmd_desc *desc)
1008 {
1009 	struct its_vlpi_map *map;
1010 
1011 	map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
1012 				    desc->its_inv_cmd.event_id);
1013 
1014 	its_encode_cmd(cmd, GITS_CMD_INV);
1015 	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
1016 	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
1017 
1018 	its_fixup_cmd(cmd);
1019 
1020 	return valid_vpe(its, map->vpe);
1021 }
1022 
its_build_vint_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1023 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
1024 					  struct its_cmd_block *cmd,
1025 					  struct its_cmd_desc *desc)
1026 {
1027 	struct its_vlpi_map *map;
1028 
1029 	map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
1030 				    desc->its_int_cmd.event_id);
1031 
1032 	its_encode_cmd(cmd, GITS_CMD_INT);
1033 	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
1034 	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
1035 
1036 	its_fixup_cmd(cmd);
1037 
1038 	return valid_vpe(its, map->vpe);
1039 }
1040 
its_build_vclear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1041 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
1042 					    struct its_cmd_block *cmd,
1043 					    struct its_cmd_desc *desc)
1044 {
1045 	struct its_vlpi_map *map;
1046 
1047 	map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
1048 				    desc->its_clear_cmd.event_id);
1049 
1050 	its_encode_cmd(cmd, GITS_CMD_CLEAR);
1051 	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
1052 	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
1053 
1054 	its_fixup_cmd(cmd);
1055 
1056 	return valid_vpe(its, map->vpe);
1057 }
1058 
its_build_invdb_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1059 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
1060 					   struct its_cmd_block *cmd,
1061 					   struct its_cmd_desc *desc)
1062 {
1063 	if (WARN_ON(!is_v4_1(its)))
1064 		return NULL;
1065 
1066 	its_encode_cmd(cmd, GITS_CMD_INVDB);
1067 	its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
1068 
1069 	its_fixup_cmd(cmd);
1070 
1071 	return valid_vpe(its, desc->its_invdb_cmd.vpe);
1072 }
1073 
its_build_vsgi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)1074 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
1075 					  struct its_cmd_block *cmd,
1076 					  struct its_cmd_desc *desc)
1077 {
1078 	if (WARN_ON(!is_v4_1(its)))
1079 		return NULL;
1080 
1081 	its_encode_cmd(cmd, GITS_CMD_VSGI);
1082 	its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
1083 	its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
1084 	its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
1085 	its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
1086 	its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
1087 	its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
1088 
1089 	its_fixup_cmd(cmd);
1090 
1091 	return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1092 }
1093 
its_cmd_ptr_to_offset(struct its_node * its,struct its_cmd_block * ptr)1094 static u64 its_cmd_ptr_to_offset(struct its_node *its,
1095 				 struct its_cmd_block *ptr)
1096 {
1097 	return (ptr - its->cmd_base) * sizeof(*ptr);
1098 }
1099 
its_queue_full(struct its_node * its)1100 static int its_queue_full(struct its_node *its)
1101 {
1102 	int widx;
1103 	int ridx;
1104 
1105 	widx = its->cmd_write - its->cmd_base;
1106 	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1107 
1108 	/* This is incredibly unlikely to happen, unless the ITS locks up. */
1109 	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1110 		return 1;
1111 
1112 	return 0;
1113 }
1114 
its_allocate_entry(struct its_node * its)1115 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1116 {
1117 	struct its_cmd_block *cmd;
1118 	u32 count = 1000000;	/* 1s! */
1119 
1120 	while (its_queue_full(its)) {
1121 		count--;
1122 		if (!count) {
1123 			pr_err_ratelimited("ITS queue not draining\n");
1124 			return NULL;
1125 		}
1126 		cpu_relax();
1127 		udelay(1);
1128 	}
1129 
1130 	cmd = its->cmd_write++;
1131 
1132 	/* Handle queue wrapping */
1133 	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1134 		its->cmd_write = its->cmd_base;
1135 
1136 	/* Clear command  */
1137 	cmd->raw_cmd[0] = 0;
1138 	cmd->raw_cmd[1] = 0;
1139 	cmd->raw_cmd[2] = 0;
1140 	cmd->raw_cmd[3] = 0;
1141 
1142 	return cmd;
1143 }
1144 
its_post_commands(struct its_node * its)1145 static struct its_cmd_block *its_post_commands(struct its_node *its)
1146 {
1147 	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1148 
1149 	writel_relaxed(wr, its->base + GITS_CWRITER);
1150 
1151 	return its->cmd_write;
1152 }
1153 
its_flush_cmd(struct its_node * its,struct its_cmd_block * cmd)1154 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1155 {
1156 	/*
1157 	 * Make sure the commands written to memory are observable by
1158 	 * the ITS.
1159 	 */
1160 	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1161 		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1162 	else
1163 		dsb(ishst);
1164 }
1165 
its_wait_for_range_completion(struct its_node * its,u64 prev_idx,struct its_cmd_block * to)1166 static int its_wait_for_range_completion(struct its_node *its,
1167 					 u64	prev_idx,
1168 					 struct its_cmd_block *to)
1169 {
1170 	u64 rd_idx, to_idx, linear_idx;
1171 	u32 count = 1000000;	/* 1s! */
1172 
1173 	/* Linearize to_idx if the command set has wrapped around */
1174 	to_idx = its_cmd_ptr_to_offset(its, to);
1175 	if (to_idx < prev_idx)
1176 		to_idx += ITS_CMD_QUEUE_SZ;
1177 
1178 	linear_idx = prev_idx;
1179 
1180 	while (1) {
1181 		s64 delta;
1182 
1183 		rd_idx = readl_relaxed(its->base + GITS_CREADR);
1184 
1185 		/*
1186 		 * Compute the read pointer progress, taking the
1187 		 * potential wrap-around into account.
1188 		 */
1189 		delta = rd_idx - prev_idx;
1190 		if (rd_idx < prev_idx)
1191 			delta += ITS_CMD_QUEUE_SZ;
1192 
1193 		linear_idx += delta;
1194 		if (linear_idx >= to_idx)
1195 			break;
1196 
1197 		count--;
1198 		if (!count) {
1199 			pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1200 					   to_idx, linear_idx);
1201 			return -1;
1202 		}
1203 		prev_idx = rd_idx;
1204 		cpu_relax();
1205 		udelay(1);
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 /* Warning, macro hell follows */
1212 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
1213 void name(struct its_node *its,						\
1214 	  buildtype builder,						\
1215 	  struct its_cmd_desc *desc)					\
1216 {									\
1217 	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
1218 	synctype *sync_obj;						\
1219 	unsigned long flags;						\
1220 	u64 rd_idx;							\
1221 									\
1222 	raw_spin_lock_irqsave(&its->lock, flags);			\
1223 									\
1224 	cmd = its_allocate_entry(its);					\
1225 	if (!cmd) {		/* We're soooooo screewed... */		\
1226 		raw_spin_unlock_irqrestore(&its->lock, flags);		\
1227 		return;							\
1228 	}								\
1229 	sync_obj = builder(its, cmd, desc);				\
1230 	its_flush_cmd(its, cmd);					\
1231 									\
1232 	if (sync_obj) {							\
1233 		sync_cmd = its_allocate_entry(its);			\
1234 		if (!sync_cmd)						\
1235 			goto post;					\
1236 									\
1237 		buildfn(its, sync_cmd, sync_obj);			\
1238 		its_flush_cmd(its, sync_cmd);				\
1239 	}								\
1240 									\
1241 post:									\
1242 	rd_idx = readl_relaxed(its->base + GITS_CREADR);		\
1243 	next_cmd = its_post_commands(its);				\
1244 	raw_spin_unlock_irqrestore(&its->lock, flags);			\
1245 									\
1246 	if (its_wait_for_range_completion(its, rd_idx, next_cmd))	\
1247 		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
1248 }
1249 
its_build_sync_cmd(struct its_node * its,struct its_cmd_block * sync_cmd,struct its_collection * sync_col)1250 static void its_build_sync_cmd(struct its_node *its,
1251 			       struct its_cmd_block *sync_cmd,
1252 			       struct its_collection *sync_col)
1253 {
1254 	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1255 	its_encode_target(sync_cmd, sync_col->target_address);
1256 
1257 	its_fixup_cmd(sync_cmd);
1258 }
1259 
BUILD_SINGLE_CMD_FUNC(its_send_single_command,its_cmd_builder_t,struct its_collection,its_build_sync_cmd)1260 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1261 			     struct its_collection, its_build_sync_cmd)
1262 
1263 static void its_build_vsync_cmd(struct its_node *its,
1264 				struct its_cmd_block *sync_cmd,
1265 				struct its_vpe *sync_vpe)
1266 {
1267 	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1268 	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1269 
1270 	its_fixup_cmd(sync_cmd);
1271 }
1272 
BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand,its_cmd_vbuilder_t,struct its_vpe,its_build_vsync_cmd)1273 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1274 			     struct its_vpe, its_build_vsync_cmd)
1275 
1276 static void its_send_int(struct its_device *dev, u32 event_id)
1277 {
1278 	struct its_cmd_desc desc;
1279 
1280 	desc.its_int_cmd.dev = dev;
1281 	desc.its_int_cmd.event_id = event_id;
1282 
1283 	its_send_single_command(dev->its, its_build_int_cmd, &desc);
1284 }
1285 
its_send_clear(struct its_device * dev,u32 event_id)1286 static void its_send_clear(struct its_device *dev, u32 event_id)
1287 {
1288 	struct its_cmd_desc desc;
1289 
1290 	desc.its_clear_cmd.dev = dev;
1291 	desc.its_clear_cmd.event_id = event_id;
1292 
1293 	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1294 }
1295 
its_send_inv(struct its_device * dev,u32 event_id)1296 static void its_send_inv(struct its_device *dev, u32 event_id)
1297 {
1298 	struct its_cmd_desc desc;
1299 
1300 	desc.its_inv_cmd.dev = dev;
1301 	desc.its_inv_cmd.event_id = event_id;
1302 
1303 	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1304 }
1305 
its_send_mapd(struct its_device * dev,int valid)1306 static void its_send_mapd(struct its_device *dev, int valid)
1307 {
1308 	struct its_cmd_desc desc;
1309 
1310 	desc.its_mapd_cmd.dev = dev;
1311 	desc.its_mapd_cmd.valid = !!valid;
1312 
1313 	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1314 }
1315 
its_send_mapc(struct its_node * its,struct its_collection * col,int valid)1316 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1317 			  int valid)
1318 {
1319 	struct its_cmd_desc desc;
1320 
1321 	desc.its_mapc_cmd.col = col;
1322 	desc.its_mapc_cmd.valid = !!valid;
1323 
1324 	its_send_single_command(its, its_build_mapc_cmd, &desc);
1325 }
1326 
its_send_mapti(struct its_device * dev,u32 irq_id,u32 id)1327 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1328 {
1329 	struct its_cmd_desc desc;
1330 
1331 	desc.its_mapti_cmd.dev = dev;
1332 	desc.its_mapti_cmd.phys_id = irq_id;
1333 	desc.its_mapti_cmd.event_id = id;
1334 
1335 	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1336 }
1337 
its_send_movi(struct its_device * dev,struct its_collection * col,u32 id)1338 static void its_send_movi(struct its_device *dev,
1339 			  struct its_collection *col, u32 id)
1340 {
1341 	struct its_cmd_desc desc;
1342 
1343 	desc.its_movi_cmd.dev = dev;
1344 	desc.its_movi_cmd.col = col;
1345 	desc.its_movi_cmd.event_id = id;
1346 
1347 	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1348 }
1349 
its_send_discard(struct its_device * dev,u32 id)1350 static void its_send_discard(struct its_device *dev, u32 id)
1351 {
1352 	struct its_cmd_desc desc;
1353 
1354 	desc.its_discard_cmd.dev = dev;
1355 	desc.its_discard_cmd.event_id = id;
1356 
1357 	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1358 }
1359 
its_send_invall(struct its_node * its,struct its_collection * col)1360 static void its_send_invall(struct its_node *its, struct its_collection *col)
1361 {
1362 	struct its_cmd_desc desc;
1363 
1364 	desc.its_invall_cmd.col = col;
1365 
1366 	its_send_single_command(its, its_build_invall_cmd, &desc);
1367 }
1368 
its_send_vmapti(struct its_device * dev,u32 id)1369 static void its_send_vmapti(struct its_device *dev, u32 id)
1370 {
1371 	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1372 	struct its_cmd_desc desc;
1373 
1374 	desc.its_vmapti_cmd.vpe = map->vpe;
1375 	desc.its_vmapti_cmd.dev = dev;
1376 	desc.its_vmapti_cmd.virt_id = map->vintid;
1377 	desc.its_vmapti_cmd.event_id = id;
1378 	desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1379 
1380 	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1381 }
1382 
its_send_vmovi(struct its_device * dev,u32 id)1383 static void its_send_vmovi(struct its_device *dev, u32 id)
1384 {
1385 	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1386 	struct its_cmd_desc desc;
1387 
1388 	desc.its_vmovi_cmd.vpe = map->vpe;
1389 	desc.its_vmovi_cmd.dev = dev;
1390 	desc.its_vmovi_cmd.event_id = id;
1391 	desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1392 
1393 	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1394 }
1395 
its_send_vmapp(struct its_node * its,struct its_vpe * vpe,bool valid)1396 static void its_send_vmapp(struct its_node *its,
1397 			   struct its_vpe *vpe, bool valid)
1398 {
1399 	struct its_cmd_desc desc;
1400 
1401 	desc.its_vmapp_cmd.vpe = vpe;
1402 	desc.its_vmapp_cmd.valid = valid;
1403 	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1404 
1405 	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1406 }
1407 
its_send_vmovp(struct its_vpe * vpe)1408 static void its_send_vmovp(struct its_vpe *vpe)
1409 {
1410 	struct its_cmd_desc desc = {};
1411 	struct its_node *its;
1412 	int col_id = vpe->col_idx;
1413 
1414 	desc.its_vmovp_cmd.vpe = vpe;
1415 
1416 	if (!its_list_map) {
1417 		its = list_first_entry(&its_nodes, struct its_node, entry);
1418 		desc.its_vmovp_cmd.col = &its->collections[col_id];
1419 		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1420 		return;
1421 	}
1422 
1423 	/*
1424 	 * Yet another marvel of the architecture. If using the
1425 	 * its_list "feature", we need to make sure that all ITSs
1426 	 * receive all VMOVP commands in the same order. The only way
1427 	 * to guarantee this is to make vmovp a serialization point.
1428 	 *
1429 	 * Wall <-- Head.
1430 	 */
1431 	guard(raw_spinlock)(&vmovp_lock);
1432 	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1433 	desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1434 
1435 	/* Emit VMOVPs */
1436 	list_for_each_entry(its, &its_nodes, entry) {
1437 		if (!is_v4(its))
1438 			continue;
1439 
1440 		if (!require_its_list_vmovp(vpe->its_vm, its))
1441 			continue;
1442 
1443 		desc.its_vmovp_cmd.col = &its->collections[col_id];
1444 		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1445 	}
1446 }
1447 
its_send_vinvall(struct its_node * its,struct its_vpe * vpe)1448 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1449 {
1450 	struct its_cmd_desc desc;
1451 
1452 	desc.its_vinvall_cmd.vpe = vpe;
1453 	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1454 }
1455 
its_send_vinv(struct its_device * dev,u32 event_id)1456 static void its_send_vinv(struct its_device *dev, u32 event_id)
1457 {
1458 	struct its_cmd_desc desc;
1459 
1460 	/*
1461 	 * There is no real VINV command. This is just a normal INV,
1462 	 * with a VSYNC instead of a SYNC.
1463 	 */
1464 	desc.its_inv_cmd.dev = dev;
1465 	desc.its_inv_cmd.event_id = event_id;
1466 
1467 	its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1468 }
1469 
its_send_vint(struct its_device * dev,u32 event_id)1470 static void its_send_vint(struct its_device *dev, u32 event_id)
1471 {
1472 	struct its_cmd_desc desc;
1473 
1474 	/*
1475 	 * There is no real VINT command. This is just a normal INT,
1476 	 * with a VSYNC instead of a SYNC.
1477 	 */
1478 	desc.its_int_cmd.dev = dev;
1479 	desc.its_int_cmd.event_id = event_id;
1480 
1481 	its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1482 }
1483 
its_send_vclear(struct its_device * dev,u32 event_id)1484 static void its_send_vclear(struct its_device *dev, u32 event_id)
1485 {
1486 	struct its_cmd_desc desc;
1487 
1488 	/*
1489 	 * There is no real VCLEAR command. This is just a normal CLEAR,
1490 	 * with a VSYNC instead of a SYNC.
1491 	 */
1492 	desc.its_clear_cmd.dev = dev;
1493 	desc.its_clear_cmd.event_id = event_id;
1494 
1495 	its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1496 }
1497 
its_send_invdb(struct its_node * its,struct its_vpe * vpe)1498 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1499 {
1500 	struct its_cmd_desc desc;
1501 
1502 	desc.its_invdb_cmd.vpe = vpe;
1503 	its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1504 }
1505 
1506 /*
1507  * irqchip functions - assumes MSI, mostly.
1508  */
lpi_write_config(struct irq_data * d,u8 clr,u8 set)1509 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1510 {
1511 	struct its_vlpi_map *map = get_vlpi_map(d);
1512 	irq_hw_number_t hwirq;
1513 	void *va;
1514 	u8 *cfg;
1515 
1516 	if (map) {
1517 		va = page_address(map->vm->vprop_page);
1518 		hwirq = map->vintid;
1519 
1520 		/* Remember the updated property */
1521 		map->properties &= ~clr;
1522 		map->properties |= set | LPI_PROP_GROUP1;
1523 	} else {
1524 		va = gic_rdists->prop_table_va;
1525 		hwirq = d->hwirq;
1526 	}
1527 
1528 	cfg = va + hwirq - 8192;
1529 	*cfg &= ~clr;
1530 	*cfg |= set | LPI_PROP_GROUP1;
1531 
1532 	/*
1533 	 * Make the above write visible to the redistributors.
1534 	 * And yes, we're flushing exactly: One. Single. Byte.
1535 	 * Humpf...
1536 	 */
1537 	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1538 		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1539 	else
1540 		dsb(ishst);
1541 }
1542 
wait_for_syncr(void __iomem * rdbase)1543 static void wait_for_syncr(void __iomem *rdbase)
1544 {
1545 	while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1546 		cpu_relax();
1547 }
1548 
__direct_lpi_inv(struct irq_data * d,u64 val)1549 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1550 {
1551 	void __iomem *rdbase;
1552 	unsigned long flags;
1553 	int cpu;
1554 
1555 	/* Target the redistributor this LPI is currently routed to */
1556 	cpu = irq_to_cpuid_lock(d, &flags);
1557 	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1558 
1559 	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1560 	gic_write_lpir(val, rdbase + GICR_INVLPIR);
1561 	wait_for_syncr(rdbase);
1562 
1563 	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1564 	irq_to_cpuid_unlock(d, flags);
1565 }
1566 
direct_lpi_inv(struct irq_data * d)1567 static void direct_lpi_inv(struct irq_data *d)
1568 {
1569 	struct its_vlpi_map *map = get_vlpi_map(d);
1570 	u64 val;
1571 
1572 	if (map) {
1573 		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1574 
1575 		WARN_ON(!is_v4_1(its_dev->its));
1576 
1577 		val  = GICR_INVLPIR_V;
1578 		val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1579 		val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1580 	} else {
1581 		val = d->hwirq;
1582 	}
1583 
1584 	__direct_lpi_inv(d, val);
1585 }
1586 
lpi_update_config(struct irq_data * d,u8 clr,u8 set)1587 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1588 {
1589 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1590 
1591 	lpi_write_config(d, clr, set);
1592 	if (gic_rdists->has_direct_lpi &&
1593 	    (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1594 		direct_lpi_inv(d);
1595 	else if (!irqd_is_forwarded_to_vcpu(d))
1596 		its_send_inv(its_dev, its_get_event_id(d));
1597 	else
1598 		its_send_vinv(its_dev, its_get_event_id(d));
1599 }
1600 
its_vlpi_set_doorbell(struct irq_data * d,bool enable)1601 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1602 {
1603 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1604 	u32 event = its_get_event_id(d);
1605 	struct its_vlpi_map *map;
1606 
1607 	/*
1608 	 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1609 	 * here.
1610 	 */
1611 	if (is_v4_1(its_dev->its))
1612 		return;
1613 
1614 	map = dev_event_to_vlpi_map(its_dev, event);
1615 
1616 	if (map->db_enabled == enable)
1617 		return;
1618 
1619 	map->db_enabled = enable;
1620 
1621 	/*
1622 	 * More fun with the architecture:
1623 	 *
1624 	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1625 	 * value or to 1023, depending on the enable bit. But that
1626 	 * would be issuing a mapping for an /existing/ DevID+EventID
1627 	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1628 	 * to the /same/ vPE, using this opportunity to adjust the
1629 	 * doorbell. Mouahahahaha. We loves it, Precious.
1630 	 */
1631 	its_send_vmovi(its_dev, event);
1632 }
1633 
its_mask_irq(struct irq_data * d)1634 static void its_mask_irq(struct irq_data *d)
1635 {
1636 	if (irqd_is_forwarded_to_vcpu(d))
1637 		its_vlpi_set_doorbell(d, false);
1638 
1639 	lpi_update_config(d, LPI_PROP_ENABLED, 0);
1640 }
1641 
its_unmask_irq(struct irq_data * d)1642 static void its_unmask_irq(struct irq_data *d)
1643 {
1644 	if (irqd_is_forwarded_to_vcpu(d))
1645 		its_vlpi_set_doorbell(d, true);
1646 
1647 	lpi_update_config(d, 0, LPI_PROP_ENABLED);
1648 }
1649 
its_read_lpi_count(struct irq_data * d,int cpu)1650 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1651 {
1652 	if (irqd_affinity_is_managed(d))
1653 		return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1654 
1655 	return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1656 }
1657 
its_inc_lpi_count(struct irq_data * d,int cpu)1658 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1659 {
1660 	if (irqd_affinity_is_managed(d))
1661 		atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1662 	else
1663 		atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1664 }
1665 
its_dec_lpi_count(struct irq_data * d,int cpu)1666 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1667 {
1668 	if (irqd_affinity_is_managed(d))
1669 		atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1670 	else
1671 		atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1672 }
1673 
cpumask_pick_least_loaded(struct irq_data * d,const struct cpumask * cpu_mask)1674 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1675 					      const struct cpumask *cpu_mask)
1676 {
1677 	unsigned int cpu = nr_cpu_ids, tmp;
1678 	int count = S32_MAX;
1679 
1680 	for_each_cpu(tmp, cpu_mask) {
1681 		int this_count = its_read_lpi_count(d, tmp);
1682 		if (this_count < count) {
1683 			cpu = tmp;
1684 		        count = this_count;
1685 		}
1686 	}
1687 
1688 	return cpu;
1689 }
1690 
1691 /*
1692  * As suggested by Thomas Gleixner in:
1693  * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1694  */
its_select_cpu(struct irq_data * d,const struct cpumask * aff_mask)1695 static int its_select_cpu(struct irq_data *d,
1696 			  const struct cpumask *aff_mask)
1697 {
1698 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1699 	static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1700 	static struct cpumask __tmpmask;
1701 	struct cpumask *tmpmask;
1702 	unsigned long flags;
1703 	int cpu, node;
1704 	node = its_dev->its->numa_node;
1705 	tmpmask = &__tmpmask;
1706 
1707 	raw_spin_lock_irqsave(&tmpmask_lock, flags);
1708 
1709 	if (!irqd_affinity_is_managed(d)) {
1710 		/* First try the NUMA node */
1711 		if (node != NUMA_NO_NODE) {
1712 			/*
1713 			 * Try the intersection of the affinity mask and the
1714 			 * node mask (and the online mask, just to be safe).
1715 			 */
1716 			cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1717 			cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1718 
1719 			/*
1720 			 * Ideally, we would check if the mask is empty, and
1721 			 * try again on the full node here.
1722 			 *
1723 			 * But it turns out that the way ACPI describes the
1724 			 * affinity for ITSs only deals about memory, and
1725 			 * not target CPUs, so it cannot describe a single
1726 			 * ITS placed next to two NUMA nodes.
1727 			 *
1728 			 * Instead, just fallback on the online mask. This
1729 			 * diverges from Thomas' suggestion above.
1730 			 */
1731 			cpu = cpumask_pick_least_loaded(d, tmpmask);
1732 			if (cpu < nr_cpu_ids)
1733 				goto out;
1734 
1735 			/* If we can't cross sockets, give up */
1736 			if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1737 				goto out;
1738 
1739 			/* If the above failed, expand the search */
1740 		}
1741 
1742 		/* Try the intersection of the affinity and online masks */
1743 		cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1744 
1745 		/* If that doesn't fly, the online mask is the last resort */
1746 		if (cpumask_empty(tmpmask))
1747 			cpumask_copy(tmpmask, cpu_online_mask);
1748 
1749 		cpu = cpumask_pick_least_loaded(d, tmpmask);
1750 	} else {
1751 		cpumask_copy(tmpmask, aff_mask);
1752 
1753 		/* If we cannot cross sockets, limit the search to that node */
1754 		if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1755 		    node != NUMA_NO_NODE)
1756 			cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1757 
1758 		cpu = cpumask_pick_least_loaded(d, tmpmask);
1759 	}
1760 out:
1761 	raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1762 
1763 	pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1764 	return cpu;
1765 }
1766 
its_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1767 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1768 			    bool force)
1769 {
1770 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1771 	struct its_collection *target_col;
1772 	u32 id = its_get_event_id(d);
1773 	int cpu, prev_cpu;
1774 
1775 	/* A forwarded interrupt should use irq_set_vcpu_affinity */
1776 	if (irqd_is_forwarded_to_vcpu(d))
1777 		return -EINVAL;
1778 
1779 	prev_cpu = its_dev->event_map.col_map[id];
1780 	its_dec_lpi_count(d, prev_cpu);
1781 
1782 	if (!force)
1783 		cpu = its_select_cpu(d, mask_val);
1784 	else
1785 		cpu = cpumask_pick_least_loaded(d, mask_val);
1786 
1787 	if (cpu < 0 || cpu >= nr_cpu_ids)
1788 		goto err;
1789 
1790 	/* don't set the affinity when the target cpu is same as current one */
1791 	if (cpu != prev_cpu) {
1792 		target_col = &its_dev->its->collections[cpu];
1793 		its_send_movi(its_dev, target_col, id);
1794 		its_dev->event_map.col_map[id] = cpu;
1795 		irq_data_update_effective_affinity(d, cpumask_of(cpu));
1796 	}
1797 
1798 	its_inc_lpi_count(d, cpu);
1799 
1800 	return IRQ_SET_MASK_OK_DONE;
1801 
1802 err:
1803 	its_inc_lpi_count(d, prev_cpu);
1804 	return -EINVAL;
1805 }
1806 
its_irq_get_msi_base(struct its_device * its_dev)1807 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1808 {
1809 	struct its_node *its = its_dev->its;
1810 
1811 	return its->phys_base + GITS_TRANSLATER;
1812 }
1813 
its_irq_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)1814 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1815 {
1816 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1817 
1818 	msg->data = its_get_event_id(d);
1819 	msi_msg_set_addr(irq_data_get_msi_desc(d), msg,
1820 			 its_dev->its->get_msi_base(its_dev));
1821 }
1822 
its_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)1823 static int its_irq_set_irqchip_state(struct irq_data *d,
1824 				     enum irqchip_irq_state which,
1825 				     bool state)
1826 {
1827 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1828 	u32 event = its_get_event_id(d);
1829 
1830 	if (which != IRQCHIP_STATE_PENDING)
1831 		return -EINVAL;
1832 
1833 	if (irqd_is_forwarded_to_vcpu(d)) {
1834 		if (state)
1835 			its_send_vint(its_dev, event);
1836 		else
1837 			its_send_vclear(its_dev, event);
1838 	} else {
1839 		if (state)
1840 			its_send_int(its_dev, event);
1841 		else
1842 			its_send_clear(its_dev, event);
1843 	}
1844 
1845 	return 0;
1846 }
1847 
its_irq_retrigger(struct irq_data * d)1848 static int its_irq_retrigger(struct irq_data *d)
1849 {
1850 	return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1851 }
1852 
1853 /*
1854  * Two favourable cases:
1855  *
1856  * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1857  *     for vSGI delivery
1858  *
1859  * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1860  *     and we're better off mapping all VPEs always
1861  *
1862  * If neither (a) nor (b) is true, then we map vPEs on demand.
1863  *
1864  */
gic_requires_eager_mapping(void)1865 static bool gic_requires_eager_mapping(void)
1866 {
1867 	if (!its_list_map || gic_rdists->has_rvpeid)
1868 		return true;
1869 
1870 	return false;
1871 }
1872 
its_map_vm(struct its_node * its,struct its_vm * vm)1873 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1874 {
1875 	if (gic_requires_eager_mapping())
1876 		return;
1877 
1878 	guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1879 
1880 	/*
1881 	 * If the VM wasn't mapped yet, iterate over the vpes and get
1882 	 * them mapped now.
1883 	 */
1884 	vm->vlpi_count[its->list_nr]++;
1885 
1886 	if (vm->vlpi_count[its->list_nr] == 1) {
1887 		int i;
1888 
1889 		for (i = 0; i < vm->nr_vpes; i++) {
1890 			struct its_vpe *vpe = vm->vpes[i];
1891 
1892 			scoped_guard(raw_spinlock, &vpe->vpe_lock)
1893 				its_send_vmapp(its, vpe, true);
1894 
1895 			its_send_vinvall(its, vpe);
1896 		}
1897 	}
1898 }
1899 
its_unmap_vm(struct its_node * its,struct its_vm * vm)1900 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1901 {
1902 	/* Not using the ITS list? Everything is always mapped. */
1903 	if (gic_requires_eager_mapping())
1904 		return;
1905 
1906 	guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1907 
1908 	if (!--vm->vlpi_count[its->list_nr]) {
1909 		int i;
1910 
1911 		for (i = 0; i < vm->nr_vpes; i++) {
1912 			guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
1913 			its_send_vmapp(its, vm->vpes[i], false);
1914 		}
1915 	}
1916 }
1917 
its_vlpi_map(struct irq_data * d,struct its_cmd_info * info)1918 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1919 {
1920 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1921 	u32 event = its_get_event_id(d);
1922 
1923 	if (!info->map)
1924 		return -EINVAL;
1925 
1926 	if (!its_dev->event_map.vm) {
1927 		struct its_vlpi_map *maps;
1928 
1929 		maps = kzalloc_objs(*maps, its_dev->event_map.nr_lpis,
1930 				    GFP_ATOMIC);
1931 		if (!maps)
1932 			return -ENOMEM;
1933 
1934 		its_dev->event_map.vm = info->map->vm;
1935 		its_dev->event_map.vlpi_maps = maps;
1936 	} else if (its_dev->event_map.vm != info->map->vm) {
1937 		return -EINVAL;
1938 	}
1939 
1940 	/* Get our private copy of the mapping information */
1941 	its_dev->event_map.vlpi_maps[event] = *info->map;
1942 
1943 	if (irqd_is_forwarded_to_vcpu(d)) {
1944 		/* Already mapped, move it around */
1945 		its_send_vmovi(its_dev, event);
1946 	} else {
1947 		/* Ensure all the VPEs are mapped on this ITS */
1948 		its_map_vm(its_dev->its, info->map->vm);
1949 
1950 		/*
1951 		 * Flag the interrupt as forwarded so that we can
1952 		 * start poking the virtual property table.
1953 		 */
1954 		irqd_set_forwarded_to_vcpu(d);
1955 
1956 		/* Write out the property to the prop table */
1957 		lpi_write_config(d, 0xff, info->map->properties);
1958 
1959 		/* Drop the physical mapping */
1960 		its_send_discard(its_dev, event);
1961 
1962 		/* and install the virtual one */
1963 		its_send_vmapti(its_dev, event);
1964 
1965 		/* Increment the number of VLPIs */
1966 		its_dev->event_map.nr_vlpis++;
1967 	}
1968 
1969 	return 0;
1970 }
1971 
its_vlpi_get(struct irq_data * d,struct its_cmd_info * info)1972 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1973 {
1974 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1975 	struct its_vlpi_map *map;
1976 
1977 	map = get_vlpi_map(d);
1978 
1979 	if (!its_dev->event_map.vm || !map)
1980 		return -EINVAL;
1981 
1982 	/* Copy our mapping information to the incoming request */
1983 	*info->map = *map;
1984 
1985 	return 0;
1986 }
1987 
its_vlpi_unmap(struct irq_data * d)1988 static int its_vlpi_unmap(struct irq_data *d)
1989 {
1990 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1991 	u32 event = its_get_event_id(d);
1992 
1993 	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1994 		return -EINVAL;
1995 
1996 	/* Drop the virtual mapping */
1997 	its_send_discard(its_dev, event);
1998 
1999 	/* and restore the physical one */
2000 	irqd_clr_forwarded_to_vcpu(d);
2001 	its_send_mapti(its_dev, d->hwirq, event);
2002 	lpi_update_config(d, 0xff, (lpi_prop_prio |
2003 				    LPI_PROP_ENABLED |
2004 				    LPI_PROP_GROUP1));
2005 
2006 	/* Potentially unmap the VM from this ITS */
2007 	its_unmap_vm(its_dev->its, its_dev->event_map.vm);
2008 
2009 	/*
2010 	 * Drop the refcount and make the device available again if
2011 	 * this was the last VLPI.
2012 	 */
2013 	if (!--its_dev->event_map.nr_vlpis) {
2014 		its_dev->event_map.vm = NULL;
2015 		kfree(its_dev->event_map.vlpi_maps);
2016 	}
2017 
2018 	return 0;
2019 }
2020 
its_vlpi_prop_update(struct irq_data * d,struct its_cmd_info * info)2021 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
2022 {
2023 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2024 
2025 	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
2026 		return -EINVAL;
2027 
2028 	if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
2029 		lpi_update_config(d, 0xff, info->config);
2030 	else
2031 		lpi_write_config(d, 0xff, info->config);
2032 	its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
2033 
2034 	return 0;
2035 }
2036 
its_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)2037 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2038 {
2039 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2040 	struct its_cmd_info *info = vcpu_info;
2041 
2042 	/* Need a v4 ITS */
2043 	if (!is_v4(its_dev->its))
2044 		return -EINVAL;
2045 
2046 	guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
2047 
2048 	/* Unmap request? */
2049 	if (!info)
2050 		return its_vlpi_unmap(d);
2051 
2052 	switch (info->cmd_type) {
2053 	case MAP_VLPI:
2054 		return its_vlpi_map(d, info);
2055 
2056 	case GET_VLPI:
2057 		return its_vlpi_get(d, info);
2058 
2059 	case PROP_UPDATE_VLPI:
2060 	case PROP_UPDATE_AND_INV_VLPI:
2061 		return its_vlpi_prop_update(d, info);
2062 
2063 	default:
2064 		return -EINVAL;
2065 	}
2066 }
2067 
2068 static struct irq_chip its_irq_chip = {
2069 	.name			= "ITS",
2070 	.irq_mask		= its_mask_irq,
2071 	.irq_unmask		= its_unmask_irq,
2072 	.irq_eoi		= irq_chip_eoi_parent,
2073 	.irq_set_affinity	= its_set_affinity,
2074 	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
2075 	.irq_set_irqchip_state	= its_irq_set_irqchip_state,
2076 	.irq_retrigger		= its_irq_retrigger,
2077 	.irq_set_vcpu_affinity	= its_irq_set_vcpu_affinity,
2078 };
2079 
2080 
2081 /*
2082  * How we allocate LPIs:
2083  *
2084  * lpi_range_list contains ranges of LPIs that are to available to
2085  * allocate from. To allocate LPIs, just pick the first range that
2086  * fits the required allocation, and reduce it by the required
2087  * amount. Once empty, remove the range from the list.
2088  *
2089  * To free a range of LPIs, add a free range to the list, sort it and
2090  * merge the result if the new range happens to be adjacent to an
2091  * already free block.
2092  *
2093  * The consequence of the above is that allocation is cost is low, but
2094  * freeing is expensive. We assumes that freeing rarely occurs.
2095  */
2096 #define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
2097 
2098 static DEFINE_MUTEX(lpi_range_lock);
2099 static LIST_HEAD(lpi_range_list);
2100 
2101 struct lpi_range {
2102 	struct list_head	entry;
2103 	u32			base_id;
2104 	u32			span;
2105 };
2106 
mk_lpi_range(u32 base,u32 span)2107 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2108 {
2109 	struct lpi_range *range;
2110 
2111 	range = kmalloc_obj(*range);
2112 	if (range) {
2113 		range->base_id = base;
2114 		range->span = span;
2115 	}
2116 
2117 	return range;
2118 }
2119 
alloc_lpi_range(u32 nr_lpis,u32 * base)2120 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2121 {
2122 	struct lpi_range *range, *tmp;
2123 	int err = -ENOSPC;
2124 
2125 	mutex_lock(&lpi_range_lock);
2126 
2127 	list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2128 		if (range->span >= nr_lpis) {
2129 			*base = range->base_id;
2130 			range->base_id += nr_lpis;
2131 			range->span -= nr_lpis;
2132 
2133 			if (range->span == 0) {
2134 				list_del(&range->entry);
2135 				kfree(range);
2136 			}
2137 
2138 			err = 0;
2139 			break;
2140 		}
2141 	}
2142 
2143 	mutex_unlock(&lpi_range_lock);
2144 
2145 	pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2146 	return err;
2147 }
2148 
merge_lpi_ranges(struct lpi_range * a,struct lpi_range * b)2149 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2150 {
2151 	if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2152 		return;
2153 	if (a->base_id + a->span != b->base_id)
2154 		return;
2155 	b->base_id = a->base_id;
2156 	b->span += a->span;
2157 	list_del(&a->entry);
2158 	kfree(a);
2159 }
2160 
free_lpi_range(u32 base,u32 nr_lpis)2161 static int free_lpi_range(u32 base, u32 nr_lpis)
2162 {
2163 	struct lpi_range *new, *old;
2164 
2165 	new = mk_lpi_range(base, nr_lpis);
2166 	if (!new)
2167 		return -ENOMEM;
2168 
2169 	mutex_lock(&lpi_range_lock);
2170 
2171 	list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2172 		if (old->base_id < base)
2173 			break;
2174 	}
2175 	/*
2176 	 * old is the last element with ->base_id smaller than base,
2177 	 * so new goes right after it. If there are no elements with
2178 	 * ->base_id smaller than base, &old->entry ends up pointing
2179 	 * at the head of the list, and inserting new it the start of
2180 	 * the list is the right thing to do in that case as well.
2181 	 */
2182 	list_add(&new->entry, &old->entry);
2183 	/*
2184 	 * Now check if we can merge with the preceding and/or
2185 	 * following ranges.
2186 	 */
2187 	merge_lpi_ranges(old, new);
2188 	merge_lpi_ranges(new, list_next_entry(new, entry));
2189 
2190 	mutex_unlock(&lpi_range_lock);
2191 	return 0;
2192 }
2193 
its_lpi_init(u32 id_bits)2194 static int __init its_lpi_init(u32 id_bits)
2195 {
2196 	u32 lpis = (1UL << id_bits) - 8192;
2197 	u32 numlpis;
2198 	int err;
2199 
2200 	numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2201 
2202 	if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2203 		lpis = numlpis;
2204 		pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2205 			lpis);
2206 	}
2207 
2208 	/*
2209 	 * Initializing the allocator is just the same as freeing the
2210 	 * full range of LPIs.
2211 	 */
2212 	err = free_lpi_range(8192, lpis);
2213 	pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2214 	return err;
2215 }
2216 
its_lpi_alloc(int nr_irqs,u32 * base,int * nr_ids)2217 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2218 {
2219 	unsigned long *bitmap = NULL;
2220 	int err = 0;
2221 
2222 	do {
2223 		err = alloc_lpi_range(nr_irqs, base);
2224 		if (!err)
2225 			break;
2226 
2227 		nr_irqs /= 2;
2228 	} while (nr_irqs > 0);
2229 
2230 	if (!nr_irqs)
2231 		err = -ENOSPC;
2232 
2233 	if (err)
2234 		goto out;
2235 
2236 	bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2237 	if (!bitmap)
2238 		goto out;
2239 
2240 	*nr_ids = nr_irqs;
2241 
2242 out:
2243 	if (!bitmap)
2244 		*base = *nr_ids = 0;
2245 
2246 	return bitmap;
2247 }
2248 
its_lpi_free(unsigned long * bitmap,u32 base,u32 nr_ids)2249 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2250 {
2251 	WARN_ON(free_lpi_range(base, nr_ids));
2252 	bitmap_free(bitmap);
2253 }
2254 
gic_reset_prop_table(void * va)2255 static void gic_reset_prop_table(void *va)
2256 {
2257 	/* Regular IRQ priority, Group-1, disabled */
2258 	memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2259 
2260 	/* Make sure the GIC will observe the written configuration */
2261 	gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2262 }
2263 
its_allocate_prop_table(gfp_t gfp_flags)2264 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2265 {
2266 	struct page *prop_page;
2267 
2268 	prop_page = its_alloc_pages(gfp_flags,
2269 				    get_order(LPI_PROPBASE_SZ));
2270 	if (!prop_page)
2271 		return NULL;
2272 
2273 	gic_reset_prop_table(page_address(prop_page));
2274 
2275 	return prop_page;
2276 }
2277 
its_free_prop_table(struct page * prop_page)2278 static void its_free_prop_table(struct page *prop_page)
2279 {
2280 	its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
2281 }
2282 
gic_check_reserved_range(phys_addr_t addr,unsigned long size)2283 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2284 {
2285 	phys_addr_t start, end, addr_end;
2286 	u64 i;
2287 
2288 	/*
2289 	 * We don't bother checking for a kdump kernel as by
2290 	 * construction, the LPI tables are out of this kernel's
2291 	 * memory map.
2292 	 */
2293 	if (is_kdump_kernel())
2294 		return true;
2295 
2296 	addr_end = addr + size - 1;
2297 
2298 	for_each_reserved_mem_range(i, &start, &end) {
2299 		if (addr >= start && addr_end <= end)
2300 			return true;
2301 	}
2302 
2303 	/* Not found, not a good sign... */
2304 	pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2305 		&addr, &addr_end);
2306 	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2307 	return false;
2308 }
2309 
gic_reserve_range(phys_addr_t addr,unsigned long size)2310 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2311 {
2312 	if (efi_enabled(EFI_CONFIG_TABLES))
2313 		return efi_mem_reserve_persistent(addr, size);
2314 
2315 	return 0;
2316 }
2317 
its_setup_lpi_prop_table(void)2318 static int __init its_setup_lpi_prop_table(void)
2319 {
2320 	if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2321 		u64 val;
2322 
2323 		val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2324 		lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2325 
2326 		gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2327 		gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2328 						     LPI_PROPBASE_SZ,
2329 						     MEMREMAP_WB);
2330 		gic_reset_prop_table(gic_rdists->prop_table_va);
2331 	} else {
2332 		struct page *page;
2333 
2334 		lpi_id_bits = min_t(u32,
2335 				    GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2336 				    ITS_MAX_LPI_NRBITS);
2337 		page = its_allocate_prop_table(GFP_NOWAIT);
2338 		if (!page) {
2339 			pr_err("Failed to allocate PROPBASE\n");
2340 			return -ENOMEM;
2341 		}
2342 
2343 		gic_rdists->prop_table_pa = page_to_phys(page);
2344 		gic_rdists->prop_table_va = page_address(page);
2345 		WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2346 					  LPI_PROPBASE_SZ));
2347 	}
2348 
2349 	pr_info("GICv3: using LPI property table @%pa\n",
2350 		&gic_rdists->prop_table_pa);
2351 
2352 	return its_lpi_init(lpi_id_bits);
2353 }
2354 
2355 static const char *its_base_type_string[] = {
2356 	[GITS_BASER_TYPE_DEVICE]	= "Devices",
2357 	[GITS_BASER_TYPE_VCPU]		= "Virtual CPUs",
2358 	[GITS_BASER_TYPE_RESERVED3]	= "Reserved (3)",
2359 	[GITS_BASER_TYPE_COLLECTION]	= "Interrupt Collections",
2360 	[GITS_BASER_TYPE_RESERVED5] 	= "Reserved (5)",
2361 	[GITS_BASER_TYPE_RESERVED6] 	= "Reserved (6)",
2362 	[GITS_BASER_TYPE_RESERVED7] 	= "Reserved (7)",
2363 };
2364 
its_read_baser(struct its_node * its,struct its_baser * baser)2365 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2366 {
2367 	u32 idx = baser - its->tables;
2368 
2369 	return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2370 }
2371 
its_write_baser(struct its_node * its,struct its_baser * baser,u64 val)2372 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2373 			    u64 val)
2374 {
2375 	u32 idx = baser - its->tables;
2376 
2377 	gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2378 	baser->val = its_read_baser(its, baser);
2379 }
2380 
its_setup_baser(struct its_node * its,struct its_baser * baser,u64 cache,u64 shr,u32 order,bool indirect)2381 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2382 			   u64 cache, u64 shr, u32 order, bool indirect)
2383 {
2384 	u64 val = its_read_baser(its, baser);
2385 	u64 esz = GITS_BASER_ENTRY_SIZE(val);
2386 	u64 type = GITS_BASER_TYPE(val);
2387 	u64 baser_phys, tmp;
2388 	u32 alloc_pages, psz;
2389 	struct page *page;
2390 	void *base;
2391 
2392 	psz = baser->psz;
2393 	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2394 	if (alloc_pages > GITS_BASER_PAGES_MAX) {
2395 		pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2396 			&its->phys_base, its_base_type_string[type],
2397 			alloc_pages, GITS_BASER_PAGES_MAX);
2398 		alloc_pages = GITS_BASER_PAGES_MAX;
2399 		order = get_order(GITS_BASER_PAGES_MAX * psz);
2400 	}
2401 
2402 	page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2403 	if (!page)
2404 		return -ENOMEM;
2405 
2406 	base = (void *)page_address(page);
2407 	baser_phys = virt_to_phys(base);
2408 
2409 	/* Check if the physical address of the memory is above 48bits */
2410 	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2411 
2412 		/* 52bit PA is supported only when PageSize=64K */
2413 		if (psz != SZ_64K) {
2414 			pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2415 			its_free_pages(base, order);
2416 			return -ENXIO;
2417 		}
2418 
2419 		/* Convert 52bit PA to 48bit field */
2420 		baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2421 	}
2422 
2423 retry_baser:
2424 	val = (baser_phys					 |
2425 		(type << GITS_BASER_TYPE_SHIFT)			 |
2426 		((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)	 |
2427 		((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)	 |
2428 		cache						 |
2429 		shr						 |
2430 		GITS_BASER_VALID);
2431 
2432 	val |=	indirect ? GITS_BASER_INDIRECT : 0x0;
2433 
2434 	switch (psz) {
2435 	case SZ_4K:
2436 		val |= GITS_BASER_PAGE_SIZE_4K;
2437 		break;
2438 	case SZ_16K:
2439 		val |= GITS_BASER_PAGE_SIZE_16K;
2440 		break;
2441 	case SZ_64K:
2442 		val |= GITS_BASER_PAGE_SIZE_64K;
2443 		break;
2444 	}
2445 
2446 	if (!shr)
2447 		gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2448 
2449 	its_write_baser(its, baser, val);
2450 	tmp = baser->val;
2451 
2452 	if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2453 		/*
2454 		 * Shareability didn't stick. Just use
2455 		 * whatever the read reported, which is likely
2456 		 * to be the only thing this redistributor
2457 		 * supports. If that's zero, make it
2458 		 * non-cacheable as well.
2459 		 */
2460 		shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2461 		if (!shr)
2462 			cache = GITS_BASER_nC;
2463 
2464 		goto retry_baser;
2465 	}
2466 
2467 	if (val != tmp) {
2468 		pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2469 		       &its->phys_base, its_base_type_string[type],
2470 		       val, tmp);
2471 		its_free_pages(base, order);
2472 		return -ENXIO;
2473 	}
2474 
2475 	baser->order = order;
2476 	baser->base = base;
2477 	baser->psz = psz;
2478 	tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2479 
2480 	pr_info("ITS@%pa: allocated %d %s @%llx (%s, esz %d, psz %dK, shr %d)\n",
2481 		&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2482 		its_base_type_string[type],
2483 		(u64)virt_to_phys(base),
2484 		indirect ? "indirect" : "flat", (int)esz,
2485 		psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2486 
2487 	return 0;
2488 }
2489 
its_parse_indirect_baser(struct its_node * its,struct its_baser * baser,u32 * order,u32 ids)2490 static bool its_parse_indirect_baser(struct its_node *its,
2491 				     struct its_baser *baser,
2492 				     u32 *order, u32 ids)
2493 {
2494 	u64 tmp = its_read_baser(its, baser);
2495 	u64 type = GITS_BASER_TYPE(tmp);
2496 	u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2497 	u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2498 	u32 new_order = *order;
2499 	u32 psz = baser->psz;
2500 	bool indirect = false;
2501 
2502 	/* No need to enable Indirection if memory requirement < (psz*2)bytes */
2503 	if ((esz << ids) > (psz * 2)) {
2504 		/*
2505 		 * Find out whether hw supports a single or two-level table by
2506 		 * table by reading bit at offset '62' after writing '1' to it.
2507 		 */
2508 		its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2509 		indirect = !!(baser->val & GITS_BASER_INDIRECT);
2510 
2511 		if (indirect) {
2512 			/*
2513 			 * The size of the lvl2 table is equal to ITS page size
2514 			 * which is 'psz'. For computing lvl1 table size,
2515 			 * subtract ID bits that sparse lvl2 table from 'ids'
2516 			 * which is reported by ITS hardware times lvl1 table
2517 			 * entry size.
2518 			 */
2519 			ids -= ilog2(psz / (int)esz);
2520 			esz = GITS_LVL1_ENTRY_SIZE;
2521 		}
2522 	}
2523 
2524 	/*
2525 	 * Allocate as many entries as required to fit the
2526 	 * range of device IDs that the ITS can grok... The ID
2527 	 * space being incredibly sparse, this results in a
2528 	 * massive waste of memory if two-level device table
2529 	 * feature is not supported by hardware.
2530 	 */
2531 	new_order = max_t(u32, get_order(esz << ids), new_order);
2532 	if (new_order > MAX_PAGE_ORDER) {
2533 		new_order = MAX_PAGE_ORDER;
2534 		ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2535 		pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2536 			&its->phys_base, its_base_type_string[type],
2537 			device_ids(its), ids);
2538 	}
2539 
2540 	*order = new_order;
2541 
2542 	return indirect;
2543 }
2544 
compute_common_aff(u64 val)2545 static u32 compute_common_aff(u64 val)
2546 {
2547 	u32 aff, clpiaff;
2548 
2549 	aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2550 	clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2551 
2552 	return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2553 }
2554 
compute_its_aff(struct its_node * its)2555 static u32 compute_its_aff(struct its_node *its)
2556 {
2557 	u64 val;
2558 	u32 svpet;
2559 
2560 	/*
2561 	 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2562 	 * the resulting affinity. We then use that to see if this match
2563 	 * our own affinity.
2564 	 */
2565 	svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2566 	val  = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2567 	val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2568 	return compute_common_aff(val);
2569 }
2570 
find_sibling_its(struct its_node * cur_its)2571 static struct its_node *find_sibling_its(struct its_node *cur_its)
2572 {
2573 	struct its_node *its;
2574 	u32 aff;
2575 
2576 	if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2577 		return NULL;
2578 
2579 	aff = compute_its_aff(cur_its);
2580 
2581 	list_for_each_entry(its, &its_nodes, entry) {
2582 		u64 baser;
2583 
2584 		if (!is_v4_1(its) || its == cur_its)
2585 			continue;
2586 
2587 		if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2588 			continue;
2589 
2590 		if (aff != compute_its_aff(its))
2591 			continue;
2592 
2593 		/* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2594 		baser = its->tables[2].val;
2595 		if (!(baser & GITS_BASER_VALID))
2596 			continue;
2597 
2598 		return its;
2599 	}
2600 
2601 	return NULL;
2602 }
2603 
its_free_tables(struct its_node * its)2604 static void its_free_tables(struct its_node *its)
2605 {
2606 	int i;
2607 
2608 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2609 		if (its->tables[i].base) {
2610 			its_free_pages(its->tables[i].base, its->tables[i].order);
2611 			its->tables[i].base = NULL;
2612 		}
2613 	}
2614 }
2615 
its_probe_baser_psz(struct its_node * its,struct its_baser * baser)2616 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2617 {
2618 	u64 psz = SZ_64K;
2619 
2620 	while (psz) {
2621 		u64 val, gpsz;
2622 
2623 		val = its_read_baser(its, baser);
2624 		val &= ~GITS_BASER_PAGE_SIZE_MASK;
2625 
2626 		switch (psz) {
2627 		case SZ_64K:
2628 			gpsz = GITS_BASER_PAGE_SIZE_64K;
2629 			break;
2630 		case SZ_16K:
2631 			gpsz = GITS_BASER_PAGE_SIZE_16K;
2632 			break;
2633 		case SZ_4K:
2634 		default:
2635 			gpsz = GITS_BASER_PAGE_SIZE_4K;
2636 			break;
2637 		}
2638 
2639 		gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2640 
2641 		val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2642 		its_write_baser(its, baser, val);
2643 
2644 		if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2645 			break;
2646 
2647 		switch (psz) {
2648 		case SZ_64K:
2649 			psz = SZ_16K;
2650 			break;
2651 		case SZ_16K:
2652 			psz = SZ_4K;
2653 			break;
2654 		case SZ_4K:
2655 		default:
2656 			return -1;
2657 		}
2658 	}
2659 
2660 	baser->psz = psz;
2661 	return 0;
2662 }
2663 
its_alloc_tables(struct its_node * its)2664 static int its_alloc_tables(struct its_node *its)
2665 {
2666 	u64 shr = GITS_BASER_InnerShareable;
2667 	u64 cache = GITS_BASER_RaWaWb;
2668 	int err, i;
2669 
2670 	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2671 		/* erratum 24313: ignore memory access type */
2672 		cache = GITS_BASER_nCnB;
2673 
2674 	if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2675 		cache = GITS_BASER_nC;
2676 		shr = 0;
2677 	}
2678 
2679 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2680 		struct its_baser *baser = its->tables + i;
2681 		u64 val = its_read_baser(its, baser);
2682 		u64 type = GITS_BASER_TYPE(val);
2683 		bool indirect = false;
2684 		u32 order;
2685 
2686 		if (type == GITS_BASER_TYPE_NONE)
2687 			continue;
2688 
2689 		if (its_probe_baser_psz(its, baser)) {
2690 			its_free_tables(its);
2691 			return -ENXIO;
2692 		}
2693 
2694 		order = get_order(baser->psz);
2695 
2696 		switch (type) {
2697 		case GITS_BASER_TYPE_DEVICE:
2698 			indirect = its_parse_indirect_baser(its, baser, &order,
2699 							    device_ids(its));
2700 			break;
2701 
2702 		case GITS_BASER_TYPE_VCPU:
2703 			if (is_v4_1(its)) {
2704 				struct its_node *sibling;
2705 
2706 				WARN_ON(i != 2);
2707 				if ((sibling = find_sibling_its(its))) {
2708 					*baser = sibling->tables[2];
2709 					its_write_baser(its, baser, baser->val);
2710 					continue;
2711 				}
2712 			}
2713 
2714 			indirect = its_parse_indirect_baser(its, baser, &order,
2715 							    ITS_MAX_VPEID_BITS);
2716 			break;
2717 		}
2718 
2719 		err = its_setup_baser(its, baser, cache, shr, order, indirect);
2720 		if (err < 0) {
2721 			its_free_tables(its);
2722 			return err;
2723 		}
2724 
2725 		/* Update settings which will be used for next BASERn */
2726 		cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2727 		shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2728 	}
2729 
2730 	return 0;
2731 }
2732 
inherit_vpe_l1_table_from_its(void)2733 static u64 inherit_vpe_l1_table_from_its(void)
2734 {
2735 	struct its_node *its;
2736 	u64 val;
2737 	u32 aff;
2738 
2739 	val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2740 	aff = compute_common_aff(val);
2741 
2742 	list_for_each_entry(its, &its_nodes, entry) {
2743 		u64 baser, addr;
2744 
2745 		if (!is_v4_1(its))
2746 			continue;
2747 
2748 		if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2749 			continue;
2750 
2751 		if (aff != compute_its_aff(its))
2752 			continue;
2753 
2754 		/* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2755 		baser = its->tables[2].val;
2756 		if (!(baser & GITS_BASER_VALID))
2757 			continue;
2758 
2759 		/* We have a winner! */
2760 		gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2761 
2762 		val  = GICR_VPROPBASER_4_1_VALID;
2763 		if (baser & GITS_BASER_INDIRECT)
2764 			val |= GICR_VPROPBASER_4_1_INDIRECT;
2765 		val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2766 				  FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2767 		switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2768 		case GIC_PAGE_SIZE_64K:
2769 			addr = GITS_BASER_ADDR_48_to_52(baser);
2770 			break;
2771 		default:
2772 			addr = baser & GENMASK_ULL(47, 12);
2773 			break;
2774 		}
2775 		val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2776 		if (rdists_support_shareable()) {
2777 			val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2778 					  FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2779 			val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2780 					  FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2781 		}
2782 		val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2783 
2784 		*this_cpu_ptr(&local_4_1_its) = its;
2785 		return val;
2786 	}
2787 
2788 	return 0;
2789 }
2790 
inherit_vpe_l1_table_from_rd(cpumask_t ** mask)2791 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2792 {
2793 	u32 aff;
2794 	u64 val;
2795 	int cpu;
2796 
2797 	val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2798 	aff = compute_common_aff(val);
2799 
2800 	for_each_possible_cpu(cpu) {
2801 		void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2802 
2803 		if (!base || cpu == smp_processor_id())
2804 			continue;
2805 
2806 		val = gic_read_typer(base + GICR_TYPER);
2807 		if (aff != compute_common_aff(val))
2808 			continue;
2809 
2810 		/*
2811 		 * At this point, we have a victim. This particular CPU
2812 		 * has already booted, and has an affinity that matches
2813 		 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2814 		 * Make sure we don't write the Z bit in that case.
2815 		 */
2816 		val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2817 		val &= ~GICR_VPROPBASER_4_1_Z;
2818 
2819 		gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2820 		*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2821 
2822 		*this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
2823 		return val;
2824 	}
2825 
2826 	return 0;
2827 }
2828 
allocate_vpe_l2_table(int cpu,u32 id)2829 static bool allocate_vpe_l2_table(int cpu, u32 id)
2830 {
2831 	void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2832 	unsigned int psz, esz, idx, npg, gpsz;
2833 	u64 val;
2834 	struct page *page;
2835 	__le64 *table;
2836 
2837 	if (!gic_rdists->has_rvpeid)
2838 		return true;
2839 
2840 	/* Skip non-present CPUs */
2841 	if (!base)
2842 		return true;
2843 
2844 	val  = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2845 
2846 	esz  = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2847 	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2848 	npg  = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2849 
2850 	switch (gpsz) {
2851 	default:
2852 		WARN_ON(1);
2853 		fallthrough;
2854 	case GIC_PAGE_SIZE_4K:
2855 		psz = SZ_4K;
2856 		break;
2857 	case GIC_PAGE_SIZE_16K:
2858 		psz = SZ_16K;
2859 		break;
2860 	case GIC_PAGE_SIZE_64K:
2861 		psz = SZ_64K;
2862 		break;
2863 	}
2864 
2865 	/* Don't allow vpe_id that exceeds single, flat table limit */
2866 	if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2867 		return (id < (npg * psz / (esz * SZ_8)));
2868 
2869 	/* Compute 1st level table index & check if that exceeds table limit */
2870 	idx = id >> ilog2(psz / (esz * SZ_8));
2871 	if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2872 		return false;
2873 
2874 	table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2875 
2876 	/* Allocate memory for 2nd level table */
2877 	if (!table[idx]) {
2878 		page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2879 		if (!page)
2880 			return false;
2881 
2882 		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
2883 		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2884 			gic_flush_dcache_to_poc(page_address(page), psz);
2885 
2886 		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2887 
2888 		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2889 		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2890 			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2891 
2892 		/* Ensure updated table contents are visible to RD hardware */
2893 		dsb(sy);
2894 	}
2895 
2896 	return true;
2897 }
2898 
allocate_vpe_l1_table(void)2899 static int allocate_vpe_l1_table(void)
2900 {
2901 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2902 	u64 val, gpsz, npg, pa;
2903 	unsigned int psz = SZ_64K;
2904 	unsigned int np, epp, esz;
2905 	struct page *page;
2906 
2907 	if (!gic_rdists->has_rvpeid)
2908 		return 0;
2909 
2910 	/*
2911 	 * if VPENDBASER.Valid is set, disable any previously programmed
2912 	 * VPE by setting PendingLast while clearing Valid. This has the
2913 	 * effect of making sure no doorbell will be generated and we can
2914 	 * then safely clear VPROPBASER.Valid.
2915 	 */
2916 	if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2917 		gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2918 				      vlpi_base + GICR_VPENDBASER);
2919 
2920 	/*
2921 	 * If we can inherit the configuration from another RD, let's do
2922 	 * so. Otherwise, we have to go through the allocation process. We
2923 	 * assume that all RDs have the exact same requirements, as
2924 	 * nothing will work otherwise.
2925 	 */
2926 	val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2927 	if (val & GICR_VPROPBASER_4_1_VALID)
2928 		goto out;
2929 
2930 	gic_data_rdist()->vpe_table_mask = kzalloc_obj(cpumask_t, GFP_ATOMIC);
2931 	if (!gic_data_rdist()->vpe_table_mask)
2932 		return -ENOMEM;
2933 
2934 	val = inherit_vpe_l1_table_from_its();
2935 	if (val & GICR_VPROPBASER_4_1_VALID)
2936 		goto out;
2937 
2938 	/* First probe the page size */
2939 	val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2940 	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2941 	val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2942 	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2943 	esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2944 
2945 	switch (gpsz) {
2946 	default:
2947 		gpsz = GIC_PAGE_SIZE_4K;
2948 		fallthrough;
2949 	case GIC_PAGE_SIZE_4K:
2950 		psz = SZ_4K;
2951 		break;
2952 	case GIC_PAGE_SIZE_16K:
2953 		psz = SZ_16K;
2954 		break;
2955 	case GIC_PAGE_SIZE_64K:
2956 		psz = SZ_64K;
2957 		break;
2958 	}
2959 
2960 	/*
2961 	 * Start populating the register from scratch, including RO fields
2962 	 * (which we want to print in debug cases...)
2963 	 */
2964 	val = 0;
2965 	val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2966 	val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2967 
2968 	/* How many entries per GIC page? */
2969 	esz++;
2970 	epp = psz / (esz * SZ_8);
2971 
2972 	/*
2973 	 * If we need more than just a single L1 page, flag the table
2974 	 * as indirect and compute the number of required L1 pages.
2975 	 */
2976 	if (epp < ITS_MAX_VPEID) {
2977 		int nl2;
2978 
2979 		val |= GICR_VPROPBASER_4_1_INDIRECT;
2980 
2981 		/* Number of L2 pages required to cover the VPEID space */
2982 		nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2983 
2984 		/* Number of L1 pages to point to the L2 pages */
2985 		npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2986 	} else {
2987 		npg = 1;
2988 	}
2989 
2990 	val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2991 
2992 	/* Right, that's the number of CPU pages we need for L1 */
2993 	np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2994 
2995 	pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2996 		 np, npg, psz, epp, esz);
2997 	page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2998 	if (!page)
2999 		return -ENOMEM;
3000 
3001 	gic_data_rdist()->vpe_l1_base = page_address(page);
3002 	pa = virt_to_phys(page_address(page));
3003 	WARN_ON(!IS_ALIGNED(pa, psz));
3004 
3005 	val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
3006 	if (rdists_support_shareable()) {
3007 		val |= GICR_VPROPBASER_RaWb;
3008 		val |= GICR_VPROPBASER_InnerShareable;
3009 	}
3010 	val |= GICR_VPROPBASER_4_1_Z;
3011 	val |= GICR_VPROPBASER_4_1_VALID;
3012 
3013 out:
3014 	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3015 	cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
3016 
3017 	pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
3018 		 smp_processor_id(), val,
3019 		 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
3020 
3021 	return 0;
3022 }
3023 
its_alloc_collections(struct its_node * its)3024 static int its_alloc_collections(struct its_node *its)
3025 {
3026 	int i;
3027 
3028 	its->collections = kzalloc_objs(*its->collections, nr_cpu_ids);
3029 	if (!its->collections)
3030 		return -ENOMEM;
3031 
3032 	for (i = 0; i < nr_cpu_ids; i++)
3033 		its->collections[i].target_address = ~0ULL;
3034 
3035 	return 0;
3036 }
3037 
its_allocate_pending_table(gfp_t gfp_flags)3038 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
3039 {
3040 	struct page *pend_page;
3041 
3042 	pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
3043 	if (!pend_page)
3044 		return NULL;
3045 
3046 	/* Make sure the GIC will observe the zero-ed page */
3047 	gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
3048 
3049 	return pend_page;
3050 }
3051 
its_free_pending_table(struct page * pt)3052 static void its_free_pending_table(struct page *pt)
3053 {
3054 	its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
3055 }
3056 
3057 /*
3058  * Booting with kdump and LPIs enabled is generally fine. Any other
3059  * case is wrong in the absence of firmware/EFI support.
3060  */
enabled_lpis_allowed(void)3061 static bool enabled_lpis_allowed(void)
3062 {
3063 	phys_addr_t addr;
3064 	u64 val;
3065 
3066 	/* Check whether the property table is in a reserved region */
3067 	val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
3068 	addr = val & GENMASK_ULL(51, 12);
3069 
3070 	return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
3071 }
3072 
allocate_lpi_tables(void)3073 static int __init allocate_lpi_tables(void)
3074 {
3075 	u64 val;
3076 	int err, cpu;
3077 
3078 	/*
3079 	 * If LPIs are enabled while we run this from the boot CPU,
3080 	 * flag the RD tables as pre-allocated if the stars do align.
3081 	 */
3082 	val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3083 	if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3084 		gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3085 				      RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3086 		pr_info("GICv3: Using preallocated redistributor tables\n");
3087 	}
3088 
3089 	err = its_setup_lpi_prop_table();
3090 	if (err)
3091 		return err;
3092 
3093 	/*
3094 	 * We allocate all the pending tables anyway, as we may have a
3095 	 * mix of RDs that have had LPIs enabled, and some that
3096 	 * don't. We'll free the unused ones as each CPU comes online.
3097 	 */
3098 	for_each_possible_cpu(cpu) {
3099 		struct page *pend_page;
3100 
3101 		pend_page = its_allocate_pending_table(GFP_NOWAIT);
3102 		if (!pend_page) {
3103 			pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3104 			return -ENOMEM;
3105 		}
3106 
3107 		gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3108 	}
3109 
3110 	return 0;
3111 }
3112 
read_vpend_dirty_clear(void __iomem * vlpi_base)3113 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3114 {
3115 	u32 count = 1000000;	/* 1s! */
3116 	bool clean;
3117 	u64 val;
3118 
3119 	do {
3120 		val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3121 		clean = !(val & GICR_VPENDBASER_Dirty);
3122 		if (!clean) {
3123 			count--;
3124 			cpu_relax();
3125 			udelay(1);
3126 		}
3127 	} while (!clean && count);
3128 
3129 	if (unlikely(!clean))
3130 		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3131 
3132 	return val;
3133 }
3134 
its_clear_vpend_valid(void __iomem * vlpi_base,u64 clr,u64 set)3135 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3136 {
3137 	u64 val;
3138 
3139 	/* Make sure we wait until the RD is done with the initial scan */
3140 	val = read_vpend_dirty_clear(vlpi_base);
3141 	val &= ~GICR_VPENDBASER_Valid;
3142 	val &= ~clr;
3143 	val |= set;
3144 	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3145 
3146 	val = read_vpend_dirty_clear(vlpi_base);
3147 	if (unlikely(val & GICR_VPENDBASER_Dirty))
3148 		val |= GICR_VPENDBASER_PendingLast;
3149 
3150 	return val;
3151 }
3152 
its_cpu_init_lpis(void)3153 static void its_cpu_init_lpis(void)
3154 {
3155 	void __iomem *rbase = gic_data_rdist_rd_base();
3156 	struct page *pend_page;
3157 	phys_addr_t paddr;
3158 	u64 val, tmp;
3159 
3160 	if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3161 		return;
3162 
3163 	val = readl_relaxed(rbase + GICR_CTLR);
3164 	if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3165 	    (val & GICR_CTLR_ENABLE_LPIS)) {
3166 		/*
3167 		 * Check that we get the same property table on all
3168 		 * RDs. If we don't, this is hopeless.
3169 		 */
3170 		paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3171 		paddr &= GENMASK_ULL(51, 12);
3172 		if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3173 			add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3174 
3175 		paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3176 		paddr &= GENMASK_ULL(51, 16);
3177 
3178 		WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3179 		gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3180 
3181 		goto out;
3182 	}
3183 
3184 	pend_page = gic_data_rdist()->pend_page;
3185 	paddr = page_to_phys(pend_page);
3186 
3187 	/* set PROPBASE */
3188 	val = (gic_rdists->prop_table_pa |
3189 	       GICR_PROPBASER_InnerShareable |
3190 	       GICR_PROPBASER_RaWaWb |
3191 	       ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3192 
3193 	gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3194 	tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3195 
3196 	if (!rdists_support_shareable())
3197 		tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3198 
3199 	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3200 		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3201 			/*
3202 			 * The HW reports non-shareable, we must
3203 			 * remove the cacheability attributes as
3204 			 * well.
3205 			 */
3206 			val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3207 				 GICR_PROPBASER_CACHEABILITY_MASK);
3208 			val |= GICR_PROPBASER_nC;
3209 			gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3210 		}
3211 		pr_info_once("GIC: using cache flushing for LPI property table\n");
3212 		gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3213 	}
3214 
3215 	/* set PENDBASE */
3216 	val = (page_to_phys(pend_page) |
3217 	       GICR_PENDBASER_InnerShareable |
3218 	       GICR_PENDBASER_RaWaWb);
3219 
3220 	gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3221 	tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3222 
3223 	if (!rdists_support_shareable())
3224 		tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3225 
3226 	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3227 		/*
3228 		 * The HW reports non-shareable, we must remove the
3229 		 * cacheability attributes as well.
3230 		 */
3231 		val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3232 			 GICR_PENDBASER_CACHEABILITY_MASK);
3233 		val |= GICR_PENDBASER_nC;
3234 		gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3235 	}
3236 
3237 	/* Enable LPIs */
3238 	val = readl_relaxed(rbase + GICR_CTLR);
3239 	val |= GICR_CTLR_ENABLE_LPIS;
3240 	writel_relaxed(val, rbase + GICR_CTLR);
3241 
3242 out:
3243 	if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3244 		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3245 
3246 		/*
3247 		 * It's possible for CPU to receive VLPIs before it is
3248 		 * scheduled as a vPE, especially for the first CPU, and the
3249 		 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3250 		 * as out of range and dropped by GIC.
3251 		 * So we initialize IDbits to known value to avoid VLPI drop.
3252 		 */
3253 		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3254 		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3255 			smp_processor_id(), val);
3256 		gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3257 
3258 		/*
3259 		 * Also clear Valid bit of GICR_VPENDBASER, in case some
3260 		 * ancient programming gets left in and has possibility of
3261 		 * corrupting memory.
3262 		 */
3263 		val = its_clear_vpend_valid(vlpi_base, 0, 0);
3264 	}
3265 
3266 	if (allocate_vpe_l1_table()) {
3267 		/*
3268 		 * If the allocation has failed, we're in massive trouble.
3269 		 * Disable direct injection, and pray that no VM was
3270 		 * already running...
3271 		 */
3272 		gic_rdists->has_rvpeid = false;
3273 		gic_rdists->has_vlpis = false;
3274 	}
3275 
3276 	/* Make sure the GIC has seen the above */
3277 	dsb(sy);
3278 	gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3279 	pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3280 		smp_processor_id(),
3281 		gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3282 		"reserved" : "allocated",
3283 		&paddr);
3284 }
3285 
its_cpu_init_collection(struct its_node * its)3286 static void its_cpu_init_collection(struct its_node *its)
3287 {
3288 	int cpu = smp_processor_id();
3289 	u64 target;
3290 
3291 	/* avoid cross node collections and its mapping */
3292 	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3293 		struct device_node *cpu_node;
3294 
3295 		cpu_node = of_get_cpu_node(cpu, NULL);
3296 		if (its->numa_node != NUMA_NO_NODE &&
3297 			its->numa_node != of_node_to_nid(cpu_node))
3298 			return;
3299 	}
3300 
3301 	/*
3302 	 * We now have to bind each collection to its target
3303 	 * redistributor.
3304 	 */
3305 	if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3306 		/*
3307 		 * This ITS wants the physical address of the
3308 		 * redistributor.
3309 		 */
3310 		target = gic_data_rdist()->phys_base;
3311 	} else {
3312 		/* This ITS wants a linear CPU number. */
3313 		target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3314 		target = GICR_TYPER_CPU_NUMBER(target) << 16;
3315 	}
3316 
3317 	/* Perform collection mapping */
3318 	its->collections[cpu].target_address = target;
3319 	its->collections[cpu].col_id = cpu;
3320 
3321 	its_send_mapc(its, &its->collections[cpu], 1);
3322 	its_send_invall(its, &its->collections[cpu]);
3323 }
3324 
its_cpu_init_collections(void)3325 static void its_cpu_init_collections(void)
3326 {
3327 	struct its_node *its;
3328 
3329 	raw_spin_lock(&its_lock);
3330 
3331 	list_for_each_entry(its, &its_nodes, entry)
3332 		its_cpu_init_collection(its);
3333 
3334 	raw_spin_unlock(&its_lock);
3335 }
3336 
its_find_device(struct its_node * its,u32 dev_id)3337 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3338 {
3339 	struct its_device *its_dev = NULL, *tmp;
3340 	unsigned long flags;
3341 
3342 	raw_spin_lock_irqsave(&its->lock, flags);
3343 
3344 	list_for_each_entry(tmp, &its->its_device_list, entry) {
3345 		if (tmp->device_id == dev_id) {
3346 			its_dev = tmp;
3347 			break;
3348 		}
3349 	}
3350 
3351 	raw_spin_unlock_irqrestore(&its->lock, flags);
3352 
3353 	return its_dev;
3354 }
3355 
its_get_baser(struct its_node * its,u32 type)3356 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3357 {
3358 	int i;
3359 
3360 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3361 		if (GITS_BASER_TYPE(its->tables[i].val) == type)
3362 			return &its->tables[i];
3363 	}
3364 
3365 	return NULL;
3366 }
3367 
its_alloc_table_entry(struct its_node * its,struct its_baser * baser,u32 id)3368 static bool its_alloc_table_entry(struct its_node *its,
3369 				  struct its_baser *baser, u32 id)
3370 {
3371 	struct page *page;
3372 	u32 esz, idx;
3373 	__le64 *table;
3374 
3375 	/* Don't allow device id that exceeds single, flat table limit */
3376 	esz = GITS_BASER_ENTRY_SIZE(baser->val);
3377 	if (!(baser->val & GITS_BASER_INDIRECT))
3378 		return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3379 
3380 	/* Compute 1st level table index & check if that exceeds table limit */
3381 	idx = id >> ilog2(baser->psz / esz);
3382 	if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3383 		return false;
3384 
3385 	table = baser->base;
3386 
3387 	/* Allocate memory for 2nd level table */
3388 	if (!table[idx]) {
3389 		page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3390 					    get_order(baser->psz));
3391 		if (!page)
3392 			return false;
3393 
3394 		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
3395 		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3396 			gic_flush_dcache_to_poc(page_address(page), baser->psz);
3397 
3398 		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3399 
3400 		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3401 		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3402 			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3403 
3404 		/* Ensure updated table contents are visible to ITS hardware */
3405 		dsb(sy);
3406 	}
3407 
3408 	return true;
3409 }
3410 
its_alloc_device_table(struct its_node * its,u32 dev_id)3411 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3412 {
3413 	struct its_baser *baser;
3414 
3415 	baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3416 
3417 	/* Don't allow device id that exceeds ITS hardware limit */
3418 	if (!baser)
3419 		return (ilog2(dev_id) < device_ids(its));
3420 
3421 	return its_alloc_table_entry(its, baser, dev_id);
3422 }
3423 
its_alloc_vpe_table(u32 vpe_id)3424 static bool its_alloc_vpe_table(u32 vpe_id)
3425 {
3426 	struct its_node *its;
3427 	int cpu;
3428 
3429 	/*
3430 	 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3431 	 * could try and only do it on ITSs corresponding to devices
3432 	 * that have interrupts targeted at this VPE, but the
3433 	 * complexity becomes crazy (and you have tons of memory
3434 	 * anyway, right?).
3435 	 */
3436 	list_for_each_entry(its, &its_nodes, entry) {
3437 		struct its_baser *baser;
3438 
3439 		if (!is_v4(its))
3440 			continue;
3441 
3442 		baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3443 		if (!baser)
3444 			return false;
3445 
3446 		if (!its_alloc_table_entry(its, baser, vpe_id))
3447 			return false;
3448 	}
3449 
3450 	/* Non v4.1? No need to iterate RDs and go back early. */
3451 	if (!gic_rdists->has_rvpeid)
3452 		return true;
3453 
3454 	/*
3455 	 * Make sure the L2 tables are allocated for all copies of
3456 	 * the L1 table on *all* v4.1 RDs.
3457 	 */
3458 	for_each_possible_cpu(cpu) {
3459 		if (!allocate_vpe_l2_table(cpu, vpe_id))
3460 			return false;
3461 	}
3462 
3463 	return true;
3464 }
3465 
its_create_device(struct its_node * its,u32 dev_id,int nvecs,bool alloc_lpis)3466 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3467 					    int nvecs, bool alloc_lpis)
3468 {
3469 	struct its_device *dev;
3470 	unsigned long *lpi_map = NULL;
3471 	unsigned long flags;
3472 	u16 *col_map = NULL;
3473 	void *itt;
3474 	int lpi_base;
3475 	int nr_lpis;
3476 	int nr_ites;
3477 	int id_bits;
3478 	int sz;
3479 
3480 	if (!its_alloc_device_table(its, dev_id))
3481 		return NULL;
3482 
3483 	if (WARN_ON(!is_power_of_2(nvecs)))
3484 		nvecs = roundup_pow_of_two(nvecs);
3485 
3486 	/*
3487 	 * Even if the device wants a single LPI, the ITT must be
3488 	 * sized as a power of two (and you need at least one bit...).
3489 	 * Also honor the ITS's own EID limit.
3490 	 */
3491 	id_bits = FIELD_GET(GITS_TYPER_IDBITS, its->typer) + 1;
3492 	nvecs = min_t(unsigned int, nvecs, BIT(id_bits));
3493 	nr_ites = max(2, nvecs);
3494 	sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3495 	sz = max(sz, ITS_ITT_ALIGN);
3496 
3497 	itt = itt_alloc_pool(its->numa_node, sz);
3498 
3499 	dev = kzalloc_obj(*dev);
3500 
3501 	if (alloc_lpis) {
3502 		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3503 		if (lpi_map)
3504 			col_map = kcalloc(nr_lpis, sizeof(*col_map),
3505 					  GFP_KERNEL);
3506 	} else {
3507 		col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3508 		nr_lpis = 0;
3509 		lpi_base = 0;
3510 	}
3511 
3512 	if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3513 		kfree(dev);
3514 		itt_free_pool(itt, sz);
3515 		bitmap_free(lpi_map);
3516 		kfree(col_map);
3517 		return NULL;
3518 	}
3519 
3520 	gic_flush_dcache_to_poc(itt, sz);
3521 
3522 	dev->its = its;
3523 	dev->itt = itt;
3524 	dev->itt_sz = sz;
3525 	dev->nr_ites = nr_ites;
3526 	dev->event_map.lpi_map = lpi_map;
3527 	dev->event_map.col_map = col_map;
3528 	dev->event_map.lpi_base = lpi_base;
3529 	dev->event_map.nr_lpis = nr_lpis;
3530 	raw_spin_lock_init(&dev->event_map.vlpi_lock);
3531 	dev->device_id = dev_id;
3532 	INIT_LIST_HEAD(&dev->entry);
3533 
3534 	raw_spin_lock_irqsave(&its->lock, flags);
3535 	list_add(&dev->entry, &its->its_device_list);
3536 	raw_spin_unlock_irqrestore(&its->lock, flags);
3537 
3538 	/* Map device to its ITT */
3539 	its_send_mapd(dev, 1);
3540 
3541 	return dev;
3542 }
3543 
its_free_device(struct its_device * its_dev)3544 static void its_free_device(struct its_device *its_dev)
3545 {
3546 	unsigned long flags;
3547 
3548 	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3549 	list_del(&its_dev->entry);
3550 	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3551 	kfree(its_dev->event_map.col_map);
3552 	itt_free_pool(its_dev->itt, its_dev->itt_sz);
3553 	kfree(its_dev);
3554 }
3555 
its_alloc_device_irq(struct its_device * dev,int nvecs,irq_hw_number_t * hwirq)3556 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3557 {
3558 	int idx;
3559 
3560 	/* Find a free LPI region in lpi_map and allocate them. */
3561 	idx = bitmap_find_free_region(dev->event_map.lpi_map,
3562 				      dev->event_map.nr_lpis,
3563 				      get_count_order(nvecs));
3564 	if (idx < 0)
3565 		return -ENOSPC;
3566 
3567 	*hwirq = dev->event_map.lpi_base + idx;
3568 
3569 	return 0;
3570 }
3571 
its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)3572 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3573 			   int nvec, msi_alloc_info_t *info)
3574 {
3575 	struct its_node *its;
3576 	struct its_device *its_dev;
3577 	struct msi_domain_info *msi_info;
3578 	u32 dev_id;
3579 	int err = 0;
3580 
3581 	/*
3582 	 * We ignore "dev" entirely, and rely on the dev_id that has
3583 	 * been passed via the scratchpad. This limits this domain's
3584 	 * usefulness to upper layers that definitely know that they
3585 	 * are built on top of the ITS.
3586 	 */
3587 	dev_id = info->scratchpad[0].ul;
3588 
3589 	msi_info = msi_get_domain_info(domain);
3590 	its = msi_info->data;
3591 
3592 	if (!gic_rdists->has_direct_lpi &&
3593 	    vpe_proxy.dev &&
3594 	    vpe_proxy.dev->its == its &&
3595 	    dev_id == vpe_proxy.dev->device_id) {
3596 		/* Bad luck. Get yourself a better implementation */
3597 		WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3598 			  dev_id);
3599 		return -EINVAL;
3600 	}
3601 
3602 	mutex_lock(&its->dev_alloc_lock);
3603 	its_dev = its_find_device(its, dev_id);
3604 	if (its_dev) {
3605 		/*
3606 		 * We already have seen this ID, probably through
3607 		 * another alias (PCI bridge of some sort). No need to
3608 		 * create the device.
3609 		 */
3610 		its_dev->shared = true;
3611 		pr_debug("Reusing ITT for devID %x\n", dev_id);
3612 		goto out;
3613 	}
3614 
3615 	its_dev = its_create_device(its, dev_id, nvec, true);
3616 	if (!its_dev) {
3617 		err = -ENOMEM;
3618 		goto out;
3619 	}
3620 
3621 	if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3622 		its_dev->shared = true;
3623 
3624 	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3625 out:
3626 	mutex_unlock(&its->dev_alloc_lock);
3627 	info->scratchpad[0].ptr = its_dev;
3628 	return err;
3629 }
3630 
its_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * info)3631 static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
3632 {
3633 	struct its_device *its_dev = info->scratchpad[0].ptr;
3634 
3635 	guard(mutex)(&its_dev->its->dev_alloc_lock);
3636 
3637 	/* If the device is shared, keep everything around */
3638 	if (its_dev->shared)
3639 		return;
3640 
3641 	/* LPIs should have been already unmapped at this stage */
3642 	if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
3643 				       its_dev->event_map.nr_lpis)))
3644 		return;
3645 
3646 	its_lpi_free(its_dev->event_map.lpi_map,
3647 		     its_dev->event_map.lpi_base,
3648 		     its_dev->event_map.nr_lpis);
3649 
3650 	/* Unmap device/itt, and get rid of the tracking */
3651 	its_send_mapd(its_dev, 0);
3652 	its_free_device(its_dev);
3653 }
3654 
3655 static struct msi_domain_ops its_msi_domain_ops = {
3656 	.msi_prepare	= its_msi_prepare,
3657 	.msi_teardown	= its_msi_teardown,
3658 };
3659 
its_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)3660 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3661 				    unsigned int virq,
3662 				    irq_hw_number_t hwirq)
3663 {
3664 	struct irq_fwspec fwspec;
3665 
3666 	if (irq_domain_get_of_node(domain->parent)) {
3667 		fwspec.fwnode = domain->parent->fwnode;
3668 		fwspec.param_count = 3;
3669 		fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3670 		fwspec.param[1] = hwirq;
3671 		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3672 	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3673 		fwspec.fwnode = domain->parent->fwnode;
3674 		fwspec.param_count = 2;
3675 		fwspec.param[0] = hwirq;
3676 		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3677 	} else {
3678 		return -EINVAL;
3679 	}
3680 
3681 	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3682 }
3683 
its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)3684 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3685 				unsigned int nr_irqs, void *args)
3686 {
3687 	msi_alloc_info_t *info = args;
3688 	struct its_device *its_dev = info->scratchpad[0].ptr;
3689 	struct its_node *its = its_dev->its;
3690 	struct irq_data *irqd;
3691 	irq_hw_number_t hwirq;
3692 	int err;
3693 	int i;
3694 
3695 	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3696 	if (err)
3697 		return err;
3698 
3699 	err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3700 	if (err)
3701 		return err;
3702 
3703 	for (i = 0; i < nr_irqs; i++) {
3704 		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3705 		if (err)
3706 			return err;
3707 
3708 		irq_domain_set_hwirq_and_chip(domain, virq + i,
3709 					      hwirq + i, &its_irq_chip, its_dev);
3710 		irqd = irq_get_irq_data(virq + i);
3711 		irqd_set_single_target(irqd);
3712 		irqd_set_affinity_on_activate(irqd);
3713 		irqd_set_resend_when_in_progress(irqd);
3714 		pr_debug("ID:%d pID:%d vID:%d\n",
3715 			 (int)(hwirq + i - its_dev->event_map.lpi_base),
3716 			 (int)(hwirq + i), virq + i);
3717 	}
3718 
3719 	return 0;
3720 }
3721 
its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)3722 static int its_irq_domain_activate(struct irq_domain *domain,
3723 				   struct irq_data *d, bool reserve)
3724 {
3725 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3726 	u32 event = its_get_event_id(d);
3727 	int cpu;
3728 
3729 	cpu = its_select_cpu(d, cpu_online_mask);
3730 	if (cpu < 0 || cpu >= nr_cpu_ids)
3731 		return -EINVAL;
3732 
3733 	its_inc_lpi_count(d, cpu);
3734 	its_dev->event_map.col_map[event] = cpu;
3735 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
3736 
3737 	/* Map the GIC IRQ and event to the device */
3738 	its_send_mapti(its_dev, d->hwirq, event);
3739 	return 0;
3740 }
3741 
its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)3742 static void its_irq_domain_deactivate(struct irq_domain *domain,
3743 				      struct irq_data *d)
3744 {
3745 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3746 	u32 event = its_get_event_id(d);
3747 
3748 	its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3749 	/* Stop the delivery of interrupts */
3750 	its_send_discard(its_dev, event);
3751 }
3752 
its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)3753 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3754 				unsigned int nr_irqs)
3755 {
3756 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3757 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3758 	int i;
3759 
3760 	bitmap_release_region(its_dev->event_map.lpi_map,
3761 			      its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3762 			      get_count_order(nr_irqs));
3763 
3764 	for (i = 0; i < nr_irqs; i++) {
3765 		struct irq_data *data = irq_domain_get_irq_data(domain,
3766 								virq + i);
3767 		/* Nuke the entry in the domain */
3768 		irq_domain_reset_irq_data(data);
3769 	}
3770 
3771 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3772 }
3773 
3774 static const struct irq_domain_ops its_domain_ops = {
3775 	.select			= msi_lib_irq_domain_select,
3776 	.alloc			= its_irq_domain_alloc,
3777 	.free			= its_irq_domain_free,
3778 	.activate		= its_irq_domain_activate,
3779 	.deactivate		= its_irq_domain_deactivate,
3780 };
3781 
3782 /*
3783  * This is insane.
3784  *
3785  * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3786  * likely), the only way to perform an invalidate is to use a fake
3787  * device to issue an INV command, implying that the LPI has first
3788  * been mapped to some event on that device. Since this is not exactly
3789  * cheap, we try to keep that mapping around as long as possible, and
3790  * only issue an UNMAP if we're short on available slots.
3791  *
3792  * Broken by design(tm).
3793  *
3794  * GICv4.1, on the other hand, mandates that we're able to invalidate
3795  * by writing to a MMIO register. It doesn't implement the whole of
3796  * DirectLPI, but that's good enough. And most of the time, we don't
3797  * even have to invalidate anything, as the redistributor can be told
3798  * whether to generate a doorbell or not (we thus leave it enabled,
3799  * always).
3800  */
its_vpe_db_proxy_unmap_locked(struct its_vpe * vpe)3801 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3802 {
3803 	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3804 	if (gic_rdists->has_rvpeid)
3805 		return;
3806 
3807 	/* Already unmapped? */
3808 	if (vpe->vpe_proxy_event == -1)
3809 		return;
3810 
3811 	its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3812 	vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3813 
3814 	/*
3815 	 * We don't track empty slots at all, so let's move the
3816 	 * next_victim pointer if we can quickly reuse that slot
3817 	 * instead of nuking an existing entry. Not clear that this is
3818 	 * always a win though, and this might just generate a ripple
3819 	 * effect... Let's just hope VPEs don't migrate too often.
3820 	 */
3821 	if (vpe_proxy.vpes[vpe_proxy.next_victim])
3822 		vpe_proxy.next_victim = vpe->vpe_proxy_event;
3823 
3824 	vpe->vpe_proxy_event = -1;
3825 }
3826 
its_vpe_db_proxy_unmap(struct its_vpe * vpe)3827 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3828 {
3829 	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3830 	if (gic_rdists->has_rvpeid)
3831 		return;
3832 
3833 	if (!gic_rdists->has_direct_lpi) {
3834 		unsigned long flags;
3835 
3836 		raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3837 		its_vpe_db_proxy_unmap_locked(vpe);
3838 		raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3839 	}
3840 }
3841 
its_vpe_db_proxy_map_locked(struct its_vpe * vpe)3842 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3843 {
3844 	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3845 	if (gic_rdists->has_rvpeid)
3846 		return;
3847 
3848 	/* Already mapped? */
3849 	if (vpe->vpe_proxy_event != -1)
3850 		return;
3851 
3852 	/* This slot was already allocated. Kick the other VPE out. */
3853 	if (vpe_proxy.vpes[vpe_proxy.next_victim])
3854 		its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3855 
3856 	/* Map the new VPE instead */
3857 	vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3858 	vpe->vpe_proxy_event = vpe_proxy.next_victim;
3859 	vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3860 
3861 	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3862 	its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3863 }
3864 
its_vpe_db_proxy_move(struct its_vpe * vpe,int from,int to)3865 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3866 {
3867 	unsigned long flags;
3868 	struct its_collection *target_col;
3869 
3870 	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3871 	if (gic_rdists->has_rvpeid)
3872 		return;
3873 
3874 	if (gic_rdists->has_direct_lpi) {
3875 		void __iomem *rdbase;
3876 
3877 		rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3878 		gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3879 		wait_for_syncr(rdbase);
3880 
3881 		return;
3882 	}
3883 
3884 	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3885 
3886 	its_vpe_db_proxy_map_locked(vpe);
3887 
3888 	target_col = &vpe_proxy.dev->its->collections[to];
3889 	its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3890 	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3891 
3892 	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3893 }
3894 
its_vpe_4_1_invall_locked(int cpu,struct its_vpe * vpe)3895 static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
3896 {
3897 	void __iomem *rdbase;
3898 	u64 val;
3899 
3900 	val  = GICR_INVALLR_V;
3901 	val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3902 
3903 	guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
3904 	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
3905 	gic_write_lpir(val, rdbase + GICR_INVALLR);
3906 	wait_for_syncr(rdbase);
3907 }
3908 
its_vpe_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)3909 static int its_vpe_set_affinity(struct irq_data *d,
3910 				const struct cpumask *mask_val,
3911 				bool force)
3912 {
3913 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3914 	unsigned int from, cpu = nr_cpu_ids;
3915 	struct cpumask *table_mask;
3916 	struct its_node *its;
3917 	unsigned long flags;
3918 
3919 	/*
3920 	 * Check if we're racing against a VPE being destroyed, for
3921 	 * which we don't want to allow a VMOVP.
3922 	 */
3923 	if (!atomic_read(&vpe->vmapp_count)) {
3924 		if (gic_requires_eager_mapping())
3925 			return -EINVAL;
3926 
3927 		/*
3928 		 * If we lazily map the VPEs, this isn't an error and
3929 		 * we can exit cleanly.
3930 		 */
3931 		cpu = cpumask_first(mask_val);
3932 		irq_data_update_effective_affinity(d, cpumask_of(cpu));
3933 		return IRQ_SET_MASK_OK_DONE;
3934 	}
3935 
3936 	/*
3937 	 * Changing affinity is mega expensive, so let's be as lazy as
3938 	 * we can and only do it if we really have to. Also, if mapped
3939 	 * into the proxy device, we need to move the doorbell
3940 	 * interrupt to its new location.
3941 	 *
3942 	 * Another thing is that changing the affinity of a vPE affects
3943 	 * *other interrupts* such as all the vLPIs that are routed to
3944 	 * this vPE. This means that the irq_desc lock is not enough to
3945 	 * protect us, and that we must ensure nobody samples vpe->col_idx
3946 	 * during the update, hence the lock below which must also be
3947 	 * taken on any vLPI handling path that evaluates vpe->col_idx.
3948 	 *
3949 	 * Finally, we must protect ourselves against concurrent updates of
3950 	 * the mapping state on this VM should the ITS list be in use (see
3951 	 * the shortcut in its_send_vmovp() otherewise).
3952 	 */
3953 	if (its_list_map)
3954 		raw_spin_lock(&vpe->its_vm->vmapp_lock);
3955 
3956 	from = vpe_to_cpuid_lock(vpe, &flags);
3957 	table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3958 
3959 	/*
3960 	 * If we are offered another CPU in the same GICv4.1 ITS
3961 	 * affinity, pick this one. Otherwise, any CPU will do.
3962 	 */
3963 	if (table_mask)
3964 		cpu = cpumask_any_and(mask_val, table_mask);
3965 	if (cpu < nr_cpu_ids) {
3966 		if (cpumask_test_cpu(from, mask_val) &&
3967 		    cpumask_test_cpu(from, table_mask))
3968 			cpu = from;
3969 	} else {
3970 		cpu = cpumask_first(mask_val);
3971 	}
3972 
3973 	if (from == cpu)
3974 		goto out;
3975 
3976 	vpe->col_idx = cpu;
3977 
3978 	its_send_vmovp(vpe);
3979 
3980 	its = find_4_1_its();
3981 	if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
3982 		its_vpe_4_1_invall_locked(cpu, vpe);
3983 
3984 	its_vpe_db_proxy_move(vpe, from, cpu);
3985 
3986 out:
3987 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
3988 	vpe_to_cpuid_unlock(vpe, flags);
3989 
3990 	if (its_list_map)
3991 		raw_spin_unlock(&vpe->its_vm->vmapp_lock);
3992 
3993 	return IRQ_SET_MASK_OK_DONE;
3994 }
3995 
its_wait_vpt_parse_complete(void)3996 static void its_wait_vpt_parse_complete(void)
3997 {
3998 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3999 	u64 val;
4000 
4001 	if (!gic_rdists->has_vpend_valid_dirty)
4002 		return;
4003 
4004 	WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
4005 						       val,
4006 						       !(val & GICR_VPENDBASER_Dirty),
4007 						       1, 500));
4008 }
4009 
its_vpe_schedule(struct its_vpe * vpe)4010 static void its_vpe_schedule(struct its_vpe *vpe)
4011 {
4012 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4013 	u64 val;
4014 
4015 	/* Schedule the VPE */
4016 	val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
4017 		GENMASK_ULL(51, 12);
4018 	val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
4019 	if (rdists_support_shareable()) {
4020 		val |= GICR_VPROPBASER_RaWb;
4021 		val |= GICR_VPROPBASER_InnerShareable;
4022 	}
4023 	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
4024 
4025 	val  = virt_to_phys(page_address(vpe->vpt_page)) &
4026 		GENMASK_ULL(51, 16);
4027 	if (rdists_support_shareable()) {
4028 		val |= GICR_VPENDBASER_RaWaWb;
4029 		val |= GICR_VPENDBASER_InnerShareable;
4030 	}
4031 	/*
4032 	 * There is no good way of finding out if the pending table is
4033 	 * empty as we can race against the doorbell interrupt very
4034 	 * easily. So in the end, vpe->pending_last is only an
4035 	 * indication that the vcpu has something pending, not one
4036 	 * that the pending table is empty. A good implementation
4037 	 * would be able to read its coarse map pretty quickly anyway,
4038 	 * making this a tolerable issue.
4039 	 */
4040 	val |= GICR_VPENDBASER_PendingLast;
4041 	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
4042 	val |= GICR_VPENDBASER_Valid;
4043 	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4044 }
4045 
its_vpe_deschedule(struct its_vpe * vpe)4046 static void its_vpe_deschedule(struct its_vpe *vpe)
4047 {
4048 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4049 	u64 val;
4050 
4051 	val = its_clear_vpend_valid(vlpi_base, 0, 0);
4052 
4053 	vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
4054 	vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4055 }
4056 
its_vpe_invall(struct its_vpe * vpe)4057 static void its_vpe_invall(struct its_vpe *vpe)
4058 {
4059 	struct its_node *its;
4060 
4061 	guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
4062 
4063 	list_for_each_entry(its, &its_nodes, entry) {
4064 		if (!is_v4(its))
4065 			continue;
4066 
4067 		if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
4068 			continue;
4069 
4070 		/*
4071 		 * Sending a VINVALL to a single ITS is enough, as all
4072 		 * we need is to reach the redistributors.
4073 		 */
4074 		its_send_vinvall(its, vpe);
4075 		return;
4076 	}
4077 }
4078 
its_vpe_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4079 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4080 {
4081 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4082 	struct its_cmd_info *info = vcpu_info;
4083 
4084 	switch (info->cmd_type) {
4085 	case SCHEDULE_VPE:
4086 		its_vpe_schedule(vpe);
4087 		return 0;
4088 
4089 	case DESCHEDULE_VPE:
4090 		its_vpe_deschedule(vpe);
4091 		return 0;
4092 
4093 	case COMMIT_VPE:
4094 		its_wait_vpt_parse_complete();
4095 		return 0;
4096 
4097 	case INVALL_VPE:
4098 		its_vpe_invall(vpe);
4099 		return 0;
4100 
4101 	default:
4102 		return -EINVAL;
4103 	}
4104 }
4105 
its_vpe_send_cmd(struct its_vpe * vpe,void (* cmd)(struct its_device *,u32))4106 static void its_vpe_send_cmd(struct its_vpe *vpe,
4107 			     void (*cmd)(struct its_device *, u32))
4108 {
4109 	unsigned long flags;
4110 
4111 	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
4112 
4113 	its_vpe_db_proxy_map_locked(vpe);
4114 	cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
4115 
4116 	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
4117 }
4118 
its_vpe_send_inv(struct irq_data * d)4119 static void its_vpe_send_inv(struct irq_data *d)
4120 {
4121 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4122 
4123 	if (gic_rdists->has_direct_lpi)
4124 		__direct_lpi_inv(d, d->parent_data->hwirq);
4125 	else
4126 		its_vpe_send_cmd(vpe, its_send_inv);
4127 }
4128 
its_vpe_mask_irq(struct irq_data * d)4129 static void its_vpe_mask_irq(struct irq_data *d)
4130 {
4131 	/*
4132 	 * We need to unmask the LPI, which is described by the parent
4133 	 * irq_data. Instead of calling into the parent (which won't
4134 	 * exactly do the right thing, let's simply use the
4135 	 * parent_data pointer. Yes, I'm naughty.
4136 	 */
4137 	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4138 	its_vpe_send_inv(d);
4139 }
4140 
its_vpe_unmask_irq(struct irq_data * d)4141 static void its_vpe_unmask_irq(struct irq_data *d)
4142 {
4143 	/* Same hack as above... */
4144 	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4145 	its_vpe_send_inv(d);
4146 }
4147 
its_vpe_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4148 static int its_vpe_set_irqchip_state(struct irq_data *d,
4149 				     enum irqchip_irq_state which,
4150 				     bool state)
4151 {
4152 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4153 
4154 	if (which != IRQCHIP_STATE_PENDING)
4155 		return -EINVAL;
4156 
4157 	if (gic_rdists->has_direct_lpi) {
4158 		void __iomem *rdbase;
4159 
4160 		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4161 		if (state) {
4162 			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4163 		} else {
4164 			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4165 			wait_for_syncr(rdbase);
4166 		}
4167 	} else {
4168 		if (state)
4169 			its_vpe_send_cmd(vpe, its_send_int);
4170 		else
4171 			its_vpe_send_cmd(vpe, its_send_clear);
4172 	}
4173 
4174 	return 0;
4175 }
4176 
its_vpe_retrigger(struct irq_data * d)4177 static int its_vpe_retrigger(struct irq_data *d)
4178 {
4179 	return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4180 }
4181 
4182 static struct irq_chip its_vpe_irq_chip = {
4183 	.name			= "GICv4-vpe",
4184 	.irq_mask		= its_vpe_mask_irq,
4185 	.irq_unmask		= its_vpe_unmask_irq,
4186 	.irq_eoi		= irq_chip_eoi_parent,
4187 	.irq_set_affinity	= its_vpe_set_affinity,
4188 	.irq_retrigger		= its_vpe_retrigger,
4189 	.irq_set_irqchip_state	= its_vpe_set_irqchip_state,
4190 	.irq_set_vcpu_affinity	= its_vpe_set_vcpu_affinity,
4191 };
4192 
find_4_1_its(void)4193 static struct its_node *find_4_1_its(void)
4194 {
4195 	struct its_node *its = *this_cpu_ptr(&local_4_1_its);
4196 
4197 	if (!its) {
4198 		list_for_each_entry(its, &its_nodes, entry) {
4199 			if (is_v4_1(its))
4200 				return its;
4201 		}
4202 
4203 		/* Oops? */
4204 		its = NULL;
4205 	}
4206 
4207 	return its;
4208 }
4209 
its_vpe_4_1_send_inv(struct irq_data * d)4210 static void its_vpe_4_1_send_inv(struct irq_data *d)
4211 {
4212 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4213 	struct its_node *its;
4214 
4215 	/*
4216 	 * GICv4.1 wants doorbells to be invalidated using the
4217 	 * INVDB command in order to be broadcast to all RDs. Send
4218 	 * it to the first valid ITS, and let the HW do its magic.
4219 	 */
4220 	its = find_4_1_its();
4221 	if (its)
4222 		its_send_invdb(its, vpe);
4223 }
4224 
its_vpe_4_1_mask_irq(struct irq_data * d)4225 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4226 {
4227 	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4228 	its_vpe_4_1_send_inv(d);
4229 }
4230 
its_vpe_4_1_unmask_irq(struct irq_data * d)4231 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4232 {
4233 	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4234 	its_vpe_4_1_send_inv(d);
4235 }
4236 
its_vpe_4_1_schedule(struct its_vpe * vpe,struct its_cmd_info * info)4237 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4238 				 struct its_cmd_info *info)
4239 {
4240 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4241 	u64 val = 0;
4242 
4243 	/* Schedule the VPE */
4244 	val |= GICR_VPENDBASER_Valid;
4245 	val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4246 	val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4247 	val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4248 
4249 	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4250 }
4251 
its_vpe_4_1_deschedule(struct its_vpe * vpe,struct its_cmd_info * info)4252 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4253 				   struct its_cmd_info *info)
4254 {
4255 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4256 	u64 val;
4257 
4258 	if (info->req_db) {
4259 		unsigned long flags;
4260 
4261 		/*
4262 		 * vPE is going to block: make the vPE non-resident with
4263 		 * PendingLast clear and DB set. The GIC guarantees that if
4264 		 * we read-back PendingLast clear, then a doorbell will be
4265 		 * delivered when an interrupt comes.
4266 		 *
4267 		 * Note the locking to deal with the concurrent update of
4268 		 * pending_last from the doorbell interrupt handler that can
4269 		 * run concurrently.
4270 		 */
4271 		raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4272 		val = its_clear_vpend_valid(vlpi_base,
4273 					    GICR_VPENDBASER_PendingLast,
4274 					    GICR_VPENDBASER_4_1_DB);
4275 		vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4276 		raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4277 	} else {
4278 		/*
4279 		 * We're not blocking, so just make the vPE non-resident
4280 		 * with PendingLast set, indicating that we'll be back.
4281 		 */
4282 		val = its_clear_vpend_valid(vlpi_base,
4283 					    0,
4284 					    GICR_VPENDBASER_PendingLast);
4285 		vpe->pending_last = true;
4286 	}
4287 }
4288 
its_vpe_4_1_invall(struct its_vpe * vpe)4289 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4290 {
4291 	unsigned long flags;
4292 	int cpu;
4293 
4294 	/* Target the redistributor this vPE is currently known on */
4295 	cpu = vpe_to_cpuid_lock(vpe, &flags);
4296 	its_vpe_4_1_invall_locked(cpu, vpe);
4297 	vpe_to_cpuid_unlock(vpe, flags);
4298 }
4299 
its_vpe_4_1_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4300 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4301 {
4302 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4303 	struct its_cmd_info *info = vcpu_info;
4304 
4305 	switch (info->cmd_type) {
4306 	case SCHEDULE_VPE:
4307 		its_vpe_4_1_schedule(vpe, info);
4308 		return 0;
4309 
4310 	case DESCHEDULE_VPE:
4311 		its_vpe_4_1_deschedule(vpe, info);
4312 		return 0;
4313 
4314 	case COMMIT_VPE:
4315 		its_wait_vpt_parse_complete();
4316 		return 0;
4317 
4318 	case INVALL_VPE:
4319 		its_vpe_4_1_invall(vpe);
4320 		return 0;
4321 
4322 	default:
4323 		return -EINVAL;
4324 	}
4325 }
4326 
4327 static struct irq_chip its_vpe_4_1_irq_chip = {
4328 	.name			= "GICv4.1-vpe",
4329 	.irq_mask		= its_vpe_4_1_mask_irq,
4330 	.irq_unmask		= its_vpe_4_1_unmask_irq,
4331 	.irq_eoi		= irq_chip_eoi_parent,
4332 	.irq_set_affinity	= its_vpe_set_affinity,
4333 	.irq_set_vcpu_affinity	= its_vpe_4_1_set_vcpu_affinity,
4334 };
4335 
its_configure_sgi(struct irq_data * d,bool clear)4336 static void its_configure_sgi(struct irq_data *d, bool clear)
4337 {
4338 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4339 	struct its_cmd_desc desc;
4340 
4341 	desc.its_vsgi_cmd.vpe = vpe;
4342 	desc.its_vsgi_cmd.sgi = d->hwirq;
4343 	desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4344 	desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4345 	desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4346 	desc.its_vsgi_cmd.clear = clear;
4347 
4348 	/*
4349 	 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4350 	 * destination VPE is mapped there. Since we map them eagerly at
4351 	 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4352 	 */
4353 	its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4354 }
4355 
its_sgi_mask_irq(struct irq_data * d)4356 static void its_sgi_mask_irq(struct irq_data *d)
4357 {
4358 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4359 
4360 	vpe->sgi_config[d->hwirq].enabled = false;
4361 	its_configure_sgi(d, false);
4362 }
4363 
its_sgi_unmask_irq(struct irq_data * d)4364 static void its_sgi_unmask_irq(struct irq_data *d)
4365 {
4366 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4367 
4368 	vpe->sgi_config[d->hwirq].enabled = true;
4369 	its_configure_sgi(d, false);
4370 }
4371 
its_sgi_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)4372 static int its_sgi_set_affinity(struct irq_data *d,
4373 				const struct cpumask *mask_val,
4374 				bool force)
4375 {
4376 	/*
4377 	 * There is no notion of affinity for virtual SGIs, at least
4378 	 * not on the host (since they can only be targeting a vPE).
4379 	 * Tell the kernel we've done whatever it asked for.
4380 	 */
4381 	irq_data_update_effective_affinity(d, mask_val);
4382 	return IRQ_SET_MASK_OK;
4383 }
4384 
its_sgi_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4385 static int its_sgi_set_irqchip_state(struct irq_data *d,
4386 				     enum irqchip_irq_state which,
4387 				     bool state)
4388 {
4389 	if (which != IRQCHIP_STATE_PENDING)
4390 		return -EINVAL;
4391 
4392 	if (state) {
4393 		struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4394 		struct its_node *its = find_4_1_its();
4395 		u64 val;
4396 
4397 		val  = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4398 		val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4399 		writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4400 	} else {
4401 		its_configure_sgi(d, true);
4402 	}
4403 
4404 	return 0;
4405 }
4406 
its_sgi_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)4407 static int its_sgi_get_irqchip_state(struct irq_data *d,
4408 				     enum irqchip_irq_state which, bool *val)
4409 {
4410 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4411 	void __iomem *base;
4412 	unsigned long flags;
4413 	u32 count = 1000000;	/* 1s! */
4414 	u32 status;
4415 	int cpu;
4416 
4417 	if (which != IRQCHIP_STATE_PENDING)
4418 		return -EINVAL;
4419 
4420 	/*
4421 	 * Locking galore! We can race against two different events:
4422 	 *
4423 	 * - Concurrent vPE affinity change: we must make sure it cannot
4424 	 *   happen, or we'll talk to the wrong redistributor. This is
4425 	 *   identical to what happens with vLPIs.
4426 	 *
4427 	 * - Concurrent VSGIPENDR access: As it involves accessing two
4428 	 *   MMIO registers, this must be made atomic one way or another.
4429 	 */
4430 	cpu = vpe_to_cpuid_lock(vpe, &flags);
4431 	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4432 	base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4433 	writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4434 	do {
4435 		status = readl_relaxed(base + GICR_VSGIPENDR);
4436 		if (!(status & GICR_VSGIPENDR_BUSY))
4437 			goto out;
4438 
4439 		count--;
4440 		if (!count) {
4441 			pr_err_ratelimited("Unable to get SGI status\n");
4442 			goto out;
4443 		}
4444 		cpu_relax();
4445 		udelay(1);
4446 	} while (count);
4447 
4448 out:
4449 	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4450 	vpe_to_cpuid_unlock(vpe, flags);
4451 
4452 	if (!count)
4453 		return -ENXIO;
4454 
4455 	*val = !!(status & (1 << d->hwirq));
4456 
4457 	return 0;
4458 }
4459 
its_sgi_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4460 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4461 {
4462 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4463 	struct its_cmd_info *info = vcpu_info;
4464 
4465 	switch (info->cmd_type) {
4466 	case PROP_UPDATE_VSGI:
4467 		vpe->sgi_config[d->hwirq].priority = info->priority;
4468 		vpe->sgi_config[d->hwirq].group = info->group;
4469 		its_configure_sgi(d, false);
4470 		return 0;
4471 
4472 	default:
4473 		return -EINVAL;
4474 	}
4475 }
4476 
4477 static struct irq_chip its_sgi_irq_chip = {
4478 	.name			= "GICv4.1-sgi",
4479 	.irq_mask		= its_sgi_mask_irq,
4480 	.irq_unmask		= its_sgi_unmask_irq,
4481 	.irq_set_affinity	= its_sgi_set_affinity,
4482 	.irq_set_irqchip_state	= its_sgi_set_irqchip_state,
4483 	.irq_get_irqchip_state	= its_sgi_get_irqchip_state,
4484 	.irq_set_vcpu_affinity	= its_sgi_set_vcpu_affinity,
4485 };
4486 
its_sgi_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4487 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4488 				    unsigned int virq, unsigned int nr_irqs,
4489 				    void *args)
4490 {
4491 	struct its_vpe *vpe = args;
4492 	int i;
4493 
4494 	/* Yes, we do want 16 SGIs */
4495 	WARN_ON(nr_irqs != 16);
4496 
4497 	for (i = 0; i < 16; i++) {
4498 		vpe->sgi_config[i].priority = 0;
4499 		vpe->sgi_config[i].enabled = false;
4500 		vpe->sgi_config[i].group = false;
4501 
4502 		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4503 					      &its_sgi_irq_chip, vpe);
4504 		irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4505 	}
4506 
4507 	return 0;
4508 }
4509 
its_sgi_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4510 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4511 				    unsigned int virq,
4512 				    unsigned int nr_irqs)
4513 {
4514 	/* Nothing to do */
4515 }
4516 
its_sgi_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4517 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4518 				       struct irq_data *d, bool reserve)
4519 {
4520 	/* Write out the initial SGI configuration */
4521 	its_configure_sgi(d, false);
4522 	return 0;
4523 }
4524 
its_sgi_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4525 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4526 					  struct irq_data *d)
4527 {
4528 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4529 
4530 	/*
4531 	 * The VSGI command is awkward:
4532 	 *
4533 	 * - To change the configuration, CLEAR must be set to false,
4534 	 *   leaving the pending bit unchanged.
4535 	 * - To clear the pending bit, CLEAR must be set to true, leaving
4536 	 *   the configuration unchanged.
4537 	 *
4538 	 * You just can't do both at once, hence the two commands below.
4539 	 */
4540 	vpe->sgi_config[d->hwirq].enabled = false;
4541 	its_configure_sgi(d, false);
4542 	its_configure_sgi(d, true);
4543 }
4544 
4545 static const struct irq_domain_ops its_sgi_domain_ops = {
4546 	.alloc		= its_sgi_irq_domain_alloc,
4547 	.free		= its_sgi_irq_domain_free,
4548 	.activate	= its_sgi_irq_domain_activate,
4549 	.deactivate	= its_sgi_irq_domain_deactivate,
4550 };
4551 
its_vpe_id_alloc(void)4552 static int its_vpe_id_alloc(void)
4553 {
4554 	return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
4555 }
4556 
its_vpe_id_free(u16 id)4557 static void its_vpe_id_free(u16 id)
4558 {
4559 	ida_free(&its_vpeid_ida, id);
4560 }
4561 
its_vpe_init(struct its_vpe * vpe)4562 static int its_vpe_init(struct its_vpe *vpe)
4563 {
4564 	struct page *vpt_page;
4565 	int vpe_id;
4566 
4567 	/* Allocate vpe_id */
4568 	vpe_id = its_vpe_id_alloc();
4569 	if (vpe_id < 0)
4570 		return vpe_id;
4571 
4572 	/* Allocate VPT */
4573 	vpt_page = its_allocate_pending_table(GFP_KERNEL);
4574 	if (!vpt_page) {
4575 		its_vpe_id_free(vpe_id);
4576 		return -ENOMEM;
4577 	}
4578 
4579 	if (!its_alloc_vpe_table(vpe_id)) {
4580 		its_vpe_id_free(vpe_id);
4581 		its_free_pending_table(vpt_page);
4582 		return -ENOMEM;
4583 	}
4584 
4585 	raw_spin_lock_init(&vpe->vpe_lock);
4586 	vpe->vpe_id = vpe_id;
4587 	vpe->vpt_page = vpt_page;
4588 	atomic_set(&vpe->vmapp_count, 0);
4589 	if (!gic_rdists->has_rvpeid)
4590 		vpe->vpe_proxy_event = -1;
4591 
4592 	return 0;
4593 }
4594 
its_vpe_teardown(struct its_vpe * vpe)4595 static void its_vpe_teardown(struct its_vpe *vpe)
4596 {
4597 	its_vpe_db_proxy_unmap(vpe);
4598 	its_vpe_id_free(vpe->vpe_id);
4599 	its_free_pending_table(vpe->vpt_page);
4600 }
4601 
its_vpe_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4602 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4603 				    unsigned int virq,
4604 				    unsigned int nr_irqs)
4605 {
4606 	struct its_vm *vm = domain->host_data;
4607 	int i;
4608 
4609 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4610 
4611 	for (i = 0; i < nr_irqs; i++) {
4612 		struct irq_data *data = irq_domain_get_irq_data(domain,
4613 								virq + i);
4614 		struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4615 
4616 		BUG_ON(vm != vpe->its_vm);
4617 
4618 		clear_bit(data->hwirq, vm->db_bitmap);
4619 		its_vpe_teardown(vpe);
4620 		irq_domain_reset_irq_data(data);
4621 	}
4622 
4623 	if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4624 		its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4625 		its_free_prop_table(vm->vprop_page);
4626 	}
4627 }
4628 
its_vpe_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4629 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4630 				    unsigned int nr_irqs, void *args)
4631 {
4632 	struct irq_chip *irqchip = &its_vpe_irq_chip;
4633 	struct its_vm *vm = args;
4634 	unsigned long *bitmap;
4635 	struct page *vprop_page;
4636 	int base, nr_ids, i, err = 0;
4637 
4638 	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4639 	if (!bitmap)
4640 		return -ENOMEM;
4641 
4642 	if (nr_ids < nr_irqs) {
4643 		its_lpi_free(bitmap, base, nr_ids);
4644 		return -ENOMEM;
4645 	}
4646 
4647 	vprop_page = its_allocate_prop_table(GFP_KERNEL);
4648 	if (!vprop_page) {
4649 		its_lpi_free(bitmap, base, nr_ids);
4650 		return -ENOMEM;
4651 	}
4652 
4653 	vm->db_bitmap = bitmap;
4654 	vm->db_lpi_base = base;
4655 	vm->nr_db_lpis = nr_ids;
4656 	vm->vprop_page = vprop_page;
4657 	raw_spin_lock_init(&vm->vmapp_lock);
4658 
4659 	if (gic_rdists->has_rvpeid)
4660 		irqchip = &its_vpe_4_1_irq_chip;
4661 
4662 	for (i = 0; i < nr_irqs; i++) {
4663 		vm->vpes[i]->vpe_db_lpi = base + i;
4664 		err = its_vpe_init(vm->vpes[i]);
4665 		if (err)
4666 			break;
4667 		err = its_irq_gic_domain_alloc(domain, virq + i,
4668 					       vm->vpes[i]->vpe_db_lpi);
4669 		if (err)
4670 			break;
4671 		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4672 					      irqchip, vm->vpes[i]);
4673 		set_bit(i, bitmap);
4674 		irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4675 	}
4676 
4677 	if (err)
4678 		its_vpe_irq_domain_free(domain, virq, i);
4679 
4680 	return err;
4681 }
4682 
its_vpe_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4683 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4684 				       struct irq_data *d, bool reserve)
4685 {
4686 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4687 	struct its_node *its;
4688 
4689 	/* Map the VPE to the first possible CPU */
4690 	vpe->col_idx = cpumask_first(cpu_online_mask);
4691 	irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4692 
4693 	/*
4694 	 * If we use the list map, we issue VMAPP on demand... Unless
4695 	 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4696 	 * so that VSGIs can work.
4697 	 */
4698 	if (!gic_requires_eager_mapping())
4699 		return 0;
4700 
4701 	list_for_each_entry(its, &its_nodes, entry) {
4702 		if (!is_v4(its))
4703 			continue;
4704 
4705 		its_send_vmapp(its, vpe, true);
4706 		its_send_vinvall(its, vpe);
4707 	}
4708 
4709 	return 0;
4710 }
4711 
its_vpe_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4712 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4713 					  struct irq_data *d)
4714 {
4715 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4716 	struct its_node *its;
4717 
4718 	/*
4719 	 * If we use the list map on GICv4.0, we unmap the VPE once no
4720 	 * VLPIs are associated with the VM.
4721 	 */
4722 	if (!gic_requires_eager_mapping())
4723 		return;
4724 
4725 	list_for_each_entry(its, &its_nodes, entry) {
4726 		if (!is_v4(its))
4727 			continue;
4728 
4729 		its_send_vmapp(its, vpe, false);
4730 	}
4731 
4732 	/*
4733 	 * There may be a direct read to the VPT after unmapping the
4734 	 * vPE, to guarantee the validity of this, we make the VPT
4735 	 * memory coherent with the CPU caches here.
4736 	 */
4737 	if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4738 		gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4739 					LPI_PENDBASE_SZ);
4740 }
4741 
4742 static const struct irq_domain_ops its_vpe_domain_ops = {
4743 	.alloc			= its_vpe_irq_domain_alloc,
4744 	.free			= its_vpe_irq_domain_free,
4745 	.activate		= its_vpe_irq_domain_activate,
4746 	.deactivate		= its_vpe_irq_domain_deactivate,
4747 };
4748 
its_force_quiescent(void __iomem * base)4749 static int its_force_quiescent(void __iomem *base)
4750 {
4751 	u32 count = 1000000;	/* 1s */
4752 	u32 val;
4753 
4754 	val = readl_relaxed(base + GITS_CTLR);
4755 	/*
4756 	 * GIC architecture specification requires the ITS to be both
4757 	 * disabled and quiescent for writes to GITS_BASER<n> or
4758 	 * GITS_CBASER to not have UNPREDICTABLE results.
4759 	 */
4760 	if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4761 		return 0;
4762 
4763 	/* Disable the generation of all interrupts to this ITS */
4764 	val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4765 	writel_relaxed(val, base + GITS_CTLR);
4766 
4767 	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
4768 	while (1) {
4769 		val = readl_relaxed(base + GITS_CTLR);
4770 		if (val & GITS_CTLR_QUIESCENT)
4771 			return 0;
4772 
4773 		count--;
4774 		if (!count)
4775 			return -EBUSY;
4776 
4777 		cpu_relax();
4778 		udelay(1);
4779 	}
4780 }
4781 
its_enable_quirk_cavium_22375(void * data)4782 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4783 {
4784 	struct its_node *its = data;
4785 
4786 	/* erratum 22375: only alloc 8MB table size (20 bits) */
4787 	its->typer &= ~GITS_TYPER_DEVBITS;
4788 	its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4789 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4790 
4791 	return true;
4792 }
4793 
its_enable_quirk_cavium_23144(void * data)4794 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4795 {
4796 	struct its_node *its = data;
4797 
4798 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4799 
4800 	return true;
4801 }
4802 
its_enable_quirk_qdf2400_e0065(void * data)4803 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4804 {
4805 	struct its_node *its = data;
4806 
4807 	/* On QDF2400, the size of the ITE is 16Bytes */
4808 	its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4809 	its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4810 
4811 	return true;
4812 }
4813 
its_irq_get_msi_base_pre_its(struct its_device * its_dev)4814 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4815 {
4816 	struct its_node *its = its_dev->its;
4817 
4818 	/*
4819 	 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4820 	 * which maps 32-bit writes targeted at a separate window of
4821 	 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4822 	 * with device ID taken from bits [device_id_bits + 1:2] of
4823 	 * the window offset.
4824 	 */
4825 	return its->pre_its_base + (its_dev->device_id << 2);
4826 }
4827 
its_enable_quirk_socionext_synquacer(void * data)4828 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4829 {
4830 	struct its_node *its = data;
4831 	u32 pre_its_window[2];
4832 	u32 ids;
4833 
4834 	if (!fwnode_property_read_u32_array(its->fwnode_handle,
4835 					   "socionext,synquacer-pre-its",
4836 					   pre_its_window,
4837 					   ARRAY_SIZE(pre_its_window))) {
4838 
4839 		its->pre_its_base = pre_its_window[0];
4840 		its->get_msi_base = its_irq_get_msi_base_pre_its;
4841 
4842 		ids = ilog2(pre_its_window[1]) - 2;
4843 		if (device_ids(its) > ids) {
4844 			its->typer &= ~GITS_TYPER_DEVBITS;
4845 			its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4846 		}
4847 
4848 		/* the pre-ITS breaks isolation, so disable MSI remapping */
4849 		its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4850 		return true;
4851 	}
4852 	return false;
4853 }
4854 
its_enable_quirk_hip07_161600802(void * data)4855 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4856 {
4857 	struct its_node *its = data;
4858 
4859 	/*
4860 	 * Hip07 insists on using the wrong address for the VLPI
4861 	 * page. Trick it into doing the right thing...
4862 	 */
4863 	its->vlpi_redist_offset = SZ_128K;
4864 	return true;
4865 }
4866 
its_enable_rk3588001(void * data)4867 static bool __maybe_unused its_enable_rk3588001(void *data)
4868 {
4869 	struct its_node *its = data;
4870 
4871 	if (!of_machine_is_compatible("rockchip,rk3588") &&
4872 	    !of_machine_is_compatible("rockchip,rk3588s"))
4873 		return false;
4874 
4875 	its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4876 	gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4877 
4878 	return true;
4879 }
4880 
its_set_non_coherent(void * data)4881 static bool its_set_non_coherent(void *data)
4882 {
4883 	struct its_node *its = data;
4884 
4885 	its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4886 	return true;
4887 }
4888 
its_enable_quirk_hip09_162100801(void * data)4889 static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
4890 {
4891 	struct its_node *its = data;
4892 
4893 	its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
4894 	return true;
4895 }
4896 
its_enable_rk3568002(void * data)4897 static bool __maybe_unused its_enable_rk3568002(void *data)
4898 {
4899 	if (!of_machine_is_compatible("rockchip,rk3566") &&
4900 	    !of_machine_is_compatible("rockchip,rk3568"))
4901 		return false;
4902 
4903 	gfp_flags_quirk |= GFP_DMA32;
4904 
4905 	return true;
4906 }
4907 
4908 static const struct gic_quirk its_quirks[] = {
4909 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4910 	{
4911 		.desc	= "ITS: Cavium errata 22375, 24313",
4912 		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
4913 		.mask	= 0xffff0fff,
4914 		.init	= its_enable_quirk_cavium_22375,
4915 	},
4916 #endif
4917 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4918 	{
4919 		.desc	= "ITS: Cavium erratum 23144",
4920 		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
4921 		.mask	= 0xffff0fff,
4922 		.init	= its_enable_quirk_cavium_23144,
4923 	},
4924 #endif
4925 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4926 	{
4927 		.desc	= "ITS: QDF2400 erratum 0065",
4928 		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
4929 		.mask	= 0xffffffff,
4930 		.init	= its_enable_quirk_qdf2400_e0065,
4931 	},
4932 #endif
4933 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4934 	{
4935 		/*
4936 		 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4937 		 * implementation, but with a 'pre-ITS' added that requires
4938 		 * special handling in software.
4939 		 */
4940 		.desc	= "ITS: Socionext Synquacer pre-ITS",
4941 		.iidr	= 0x0001143b,
4942 		.mask	= 0xffffffff,
4943 		.init	= its_enable_quirk_socionext_synquacer,
4944 	},
4945 #endif
4946 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4947 	{
4948 		.desc	= "ITS: Hip07 erratum 161600802",
4949 		.iidr	= 0x00000004,
4950 		.mask	= 0xffffffff,
4951 		.init	= its_enable_quirk_hip07_161600802,
4952 	},
4953 #endif
4954 #ifdef CONFIG_HISILICON_ERRATUM_162100801
4955 	{
4956 		.desc	= "ITS: Hip09 erratum 162100801",
4957 		.iidr	= 0x00051736,
4958 		.mask	= 0xffffffff,
4959 		.init	= its_enable_quirk_hip09_162100801,
4960 	},
4961 #endif
4962 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4963 	{
4964 		.desc   = "ITS: Rockchip erratum RK3588001",
4965 		.iidr   = 0x0201743b,
4966 		.mask   = 0xffffffff,
4967 		.init   = its_enable_rk3588001,
4968 	},
4969 #endif
4970 	{
4971 		.desc   = "ITS: non-coherent attribute",
4972 		.property = "dma-noncoherent",
4973 		.init   = its_set_non_coherent,
4974 	},
4975 #ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
4976 	{
4977 		.desc   = "ITS: Rockchip erratum RK3568002",
4978 		.iidr   = 0x0201743b,
4979 		.mask   = 0xffffffff,
4980 		.init   = its_enable_rk3568002,
4981 	},
4982 #endif
4983 	{
4984 	}
4985 };
4986 
its_enable_quirks(struct its_node * its)4987 static void its_enable_quirks(struct its_node *its)
4988 {
4989 	u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4990 
4991 	gic_enable_quirks(iidr, its_quirks, its);
4992 
4993 	if (is_of_node(its->fwnode_handle))
4994 		gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4995 				     its_quirks, its);
4996 }
4997 
its_save_disable(void * data)4998 static int its_save_disable(void *data)
4999 {
5000 	struct its_node *its;
5001 	int err = 0;
5002 
5003 	raw_spin_lock(&its_lock);
5004 	list_for_each_entry(its, &its_nodes, entry) {
5005 		void __iomem *base;
5006 
5007 		base = its->base;
5008 		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
5009 		err = its_force_quiescent(base);
5010 		if (err) {
5011 			pr_err("ITS@%pa: failed to quiesce: %d\n",
5012 			       &its->phys_base, err);
5013 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5014 			goto err;
5015 		}
5016 
5017 		its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
5018 	}
5019 
5020 err:
5021 	if (err) {
5022 		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
5023 			void __iomem *base;
5024 
5025 			base = its->base;
5026 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5027 		}
5028 	}
5029 	raw_spin_unlock(&its_lock);
5030 
5031 	return err;
5032 }
5033 
its_restore_enable(void * data)5034 static void its_restore_enable(void *data)
5035 {
5036 	struct its_node *its;
5037 	int ret;
5038 
5039 	raw_spin_lock(&its_lock);
5040 	list_for_each_entry(its, &its_nodes, entry) {
5041 		void __iomem *base;
5042 		int i;
5043 
5044 		base = its->base;
5045 
5046 		/*
5047 		 * Make sure that the ITS is disabled. If it fails to quiesce,
5048 		 * don't restore it since writing to CBASER or BASER<n>
5049 		 * registers is undefined according to the GIC v3 ITS
5050 		 * Specification.
5051 		 *
5052 		 * Firmware resuming with the ITS enabled is terminally broken.
5053 		 */
5054 		WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
5055 		ret = its_force_quiescent(base);
5056 		if (ret) {
5057 			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
5058 			       &its->phys_base, ret);
5059 			continue;
5060 		}
5061 
5062 		gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
5063 
5064 		/*
5065 		 * Writing CBASER resets CREADR to 0, so make CWRITER and
5066 		 * cmd_write line up with it.
5067 		 */
5068 		its->cmd_write = its->cmd_base;
5069 		gits_write_cwriter(0, base + GITS_CWRITER);
5070 
5071 		/* Restore GITS_BASER from the value cache. */
5072 		for (i = 0; i < GITS_BASER_NR_REGS; i++) {
5073 			struct its_baser *baser = &its->tables[i];
5074 
5075 			if (!(baser->val & GITS_BASER_VALID))
5076 				continue;
5077 
5078 			its_write_baser(its, baser, baser->val);
5079 		}
5080 		writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5081 
5082 		/*
5083 		 * Reinit the collection if it's stored in the ITS. This is
5084 		 * indicated by the col_id being less than the HCC field.
5085 		 * CID < HCC as specified in the GIC v3 Documentation.
5086 		 */
5087 		if (its->collections[smp_processor_id()].col_id <
5088 		    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
5089 			its_cpu_init_collection(its);
5090 	}
5091 	raw_spin_unlock(&its_lock);
5092 }
5093 
5094 static const struct syscore_ops its_syscore_ops = {
5095 	.suspend = its_save_disable,
5096 	.resume = its_restore_enable,
5097 };
5098 
5099 static struct syscore its_syscore = {
5100 	.ops = &its_syscore_ops,
5101 };
5102 
its_map_one(struct resource * res,int * err)5103 static void __init __iomem *its_map_one(struct resource *res, int *err)
5104 {
5105 	void __iomem *its_base;
5106 	u32 val;
5107 
5108 	its_base = ioremap(res->start, SZ_64K);
5109 	if (!its_base) {
5110 		pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
5111 		*err = -ENOMEM;
5112 		return NULL;
5113 	}
5114 
5115 	val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
5116 	if (val != 0x30 && val != 0x40) {
5117 		pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
5118 		*err = -ENODEV;
5119 		goto out_unmap;
5120 	}
5121 
5122 	*err = its_force_quiescent(its_base);
5123 	if (*err) {
5124 		pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
5125 		goto out_unmap;
5126 	}
5127 
5128 	return its_base;
5129 
5130 out_unmap:
5131 	iounmap(its_base);
5132 	return NULL;
5133 }
5134 
its_init_domain(struct its_node * its)5135 static int its_init_domain(struct its_node *its)
5136 {
5137 	struct irq_domain_info dom_info = {
5138 		.fwnode		= its->fwnode_handle,
5139 		.ops		= &its_domain_ops,
5140 		.domain_flags	= its->msi_domain_flags,
5141 		.parent		= its_parent,
5142 	};
5143 	struct msi_domain_info *info;
5144 
5145 	info = kzalloc_obj(*info);
5146 	if (!info)
5147 		return -ENOMEM;
5148 
5149 	info->ops = &its_msi_domain_ops;
5150 	info->data = its;
5151 	dom_info.host_data = info;
5152 
5153 	if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) {
5154 		kfree(info);
5155 		return -ENOMEM;
5156 	}
5157 	return 0;
5158 }
5159 
its_init_vpe_domain(void)5160 static int its_init_vpe_domain(void)
5161 {
5162 	struct its_node *its;
5163 	u32 devid;
5164 	int entries;
5165 
5166 	if (gic_rdists->has_direct_lpi) {
5167 		pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5168 		return 0;
5169 	}
5170 
5171 	/* Any ITS will do, even if not v4 */
5172 	its = list_first_entry(&its_nodes, struct its_node, entry);
5173 
5174 	entries = roundup_pow_of_two(nr_cpu_ids);
5175 	vpe_proxy.vpes = kzalloc_objs(*vpe_proxy.vpes, entries);
5176 	if (!vpe_proxy.vpes)
5177 		return -ENOMEM;
5178 
5179 	/* Use the last possible DevID */
5180 	devid = GENMASK(device_ids(its) - 1, 0);
5181 	vpe_proxy.dev = its_create_device(its, devid, entries, false);
5182 	if (!vpe_proxy.dev) {
5183 		kfree(vpe_proxy.vpes);
5184 		pr_err("ITS: Can't allocate GICv4 proxy device\n");
5185 		return -ENOMEM;
5186 	}
5187 
5188 	BUG_ON(entries > vpe_proxy.dev->nr_ites);
5189 
5190 	raw_spin_lock_init(&vpe_proxy.lock);
5191 	vpe_proxy.next_victim = 0;
5192 	pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5193 		devid, vpe_proxy.dev->nr_ites);
5194 
5195 	return 0;
5196 }
5197 
its_compute_its_list_map(struct its_node * its)5198 static int __init its_compute_its_list_map(struct its_node *its)
5199 {
5200 	int its_number;
5201 	u32 ctlr;
5202 
5203 	/*
5204 	 * This is assumed to be done early enough that we're
5205 	 * guaranteed to be single-threaded, hence no
5206 	 * locking. Should this change, we should address
5207 	 * this.
5208 	 */
5209 	its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5210 	if (its_number >= GICv4_ITS_LIST_MAX) {
5211 		pr_err("ITS@%pa: No ITSList entry available!\n",
5212 		       &its->phys_base);
5213 		return -EINVAL;
5214 	}
5215 
5216 	ctlr = readl_relaxed(its->base + GITS_CTLR);
5217 	ctlr &= ~GITS_CTLR_ITS_NUMBER;
5218 	ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5219 	writel_relaxed(ctlr, its->base + GITS_CTLR);
5220 	ctlr = readl_relaxed(its->base + GITS_CTLR);
5221 	if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5222 		its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5223 		its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5224 	}
5225 
5226 	if (test_and_set_bit(its_number, &its_list_map)) {
5227 		pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5228 		       &its->phys_base, its_number);
5229 		return -EINVAL;
5230 	}
5231 
5232 	return its_number;
5233 }
5234 
its_probe_one(struct its_node * its)5235 static int __init its_probe_one(struct its_node *its)
5236 {
5237 	u64 baser, tmp;
5238 	struct page *page;
5239 	u32 ctlr;
5240 	int err;
5241 
5242 	its_enable_quirks(its);
5243 
5244 	if (is_v4(its)) {
5245 		if (!(its->typer & GITS_TYPER_VMOVP)) {
5246 			err = its_compute_its_list_map(its);
5247 			if (err < 0)
5248 				goto out;
5249 
5250 			its->list_nr = err;
5251 
5252 			pr_info("ITS@%pa: Using ITS number %d\n",
5253 				&its->phys_base, err);
5254 		} else {
5255 			pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5256 		}
5257 
5258 		if (is_v4_1(its)) {
5259 			u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5260 
5261 			its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5262 			if (!its->sgir_base) {
5263 				err = -ENOMEM;
5264 				goto out;
5265 			}
5266 
5267 			its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5268 
5269 			pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5270 				&its->phys_base, its->mpidr, svpet);
5271 		}
5272 	}
5273 
5274 	page = its_alloc_pages_node(its->numa_node,
5275 				    GFP_KERNEL | __GFP_ZERO,
5276 				    get_order(ITS_CMD_QUEUE_SZ));
5277 	if (!page) {
5278 		err = -ENOMEM;
5279 		goto out_unmap_sgir;
5280 	}
5281 	its->cmd_base = (void *)page_address(page);
5282 	its->cmd_write = its->cmd_base;
5283 
5284 	err = its_alloc_tables(its);
5285 	if (err)
5286 		goto out_free_cmd;
5287 
5288 	err = its_alloc_collections(its);
5289 	if (err)
5290 		goto out_free_tables;
5291 
5292 	baser = (virt_to_phys(its->cmd_base)	|
5293 		 GITS_CBASER_RaWaWb		|
5294 		 GITS_CBASER_InnerShareable	|
5295 		 (ITS_CMD_QUEUE_SZ / SZ_4K - 1)	|
5296 		 GITS_CBASER_VALID);
5297 
5298 	gits_write_cbaser(baser, its->base + GITS_CBASER);
5299 	tmp = gits_read_cbaser(its->base + GITS_CBASER);
5300 
5301 	if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5302 		tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5303 
5304 	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5305 		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5306 			/*
5307 			 * The HW reports non-shareable, we must
5308 			 * remove the cacheability attributes as
5309 			 * well.
5310 			 */
5311 			baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5312 				   GITS_CBASER_CACHEABILITY_MASK);
5313 			baser |= GITS_CBASER_nC;
5314 			gits_write_cbaser(baser, its->base + GITS_CBASER);
5315 		}
5316 		pr_info("ITS: using cache flushing for cmd queue\n");
5317 		its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5318 	}
5319 
5320 	gits_write_cwriter(0, its->base + GITS_CWRITER);
5321 	ctlr = readl_relaxed(its->base + GITS_CTLR);
5322 	ctlr |= GITS_CTLR_ENABLE;
5323 	if (is_v4(its))
5324 		ctlr |= GITS_CTLR_ImDe;
5325 	writel_relaxed(ctlr, its->base + GITS_CTLR);
5326 
5327 	err = its_init_domain(its);
5328 	if (err)
5329 		goto out_free_tables;
5330 
5331 	raw_spin_lock(&its_lock);
5332 	list_add(&its->entry, &its_nodes);
5333 	raw_spin_unlock(&its_lock);
5334 
5335 	return 0;
5336 
5337 out_free_tables:
5338 	its_free_tables(its);
5339 out_free_cmd:
5340 	its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5341 out_unmap_sgir:
5342 	if (its->sgir_base)
5343 		iounmap(its->sgir_base);
5344 out:
5345 	pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5346 	return err;
5347 }
5348 
gic_rdists_supports_plpis(void)5349 static bool gic_rdists_supports_plpis(void)
5350 {
5351 	return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5352 }
5353 
redist_disable_lpis(void)5354 static int redist_disable_lpis(void)
5355 {
5356 	void __iomem *rbase = gic_data_rdist_rd_base();
5357 	u64 timeout = USEC_PER_SEC;
5358 	u64 val;
5359 
5360 	if (!gic_rdists_supports_plpis()) {
5361 		pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5362 		return -ENXIO;
5363 	}
5364 
5365 	val = readl_relaxed(rbase + GICR_CTLR);
5366 	if (!(val & GICR_CTLR_ENABLE_LPIS))
5367 		return 0;
5368 
5369 	/*
5370 	 * If coming via a CPU hotplug event, we don't need to disable
5371 	 * LPIs before trying to re-enable them. They are already
5372 	 * configured and all is well in the world.
5373 	 *
5374 	 * If running with preallocated tables, there is nothing to do.
5375 	 */
5376 	if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5377 	    (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5378 		return 0;
5379 
5380 	/*
5381 	 * From that point on, we only try to do some damage control.
5382 	 */
5383 	pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5384 		smp_processor_id());
5385 	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5386 
5387 	/* Disable LPIs */
5388 	val &= ~GICR_CTLR_ENABLE_LPIS;
5389 	writel_relaxed(val, rbase + GICR_CTLR);
5390 
5391 	/* Make sure any change to GICR_CTLR is observable by the GIC */
5392 	dsb(sy);
5393 
5394 	/*
5395 	 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5396 	 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5397 	 * Error out if we time out waiting for RWP to clear.
5398 	 */
5399 	while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5400 		if (!timeout) {
5401 			pr_err("CPU%d: Timeout while disabling LPIs\n",
5402 			       smp_processor_id());
5403 			return -ETIMEDOUT;
5404 		}
5405 		udelay(1);
5406 		timeout--;
5407 	}
5408 
5409 	/*
5410 	 * After it has been written to 1, it is IMPLEMENTATION
5411 	 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5412 	 * cleared to 0. Error out if clearing the bit failed.
5413 	 */
5414 	if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5415 		pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5416 		return -EBUSY;
5417 	}
5418 
5419 	return 0;
5420 }
5421 
its_cpu_init(void)5422 int its_cpu_init(void)
5423 {
5424 	if (!list_empty(&its_nodes)) {
5425 		int ret;
5426 
5427 		ret = redist_disable_lpis();
5428 		if (ret)
5429 			return ret;
5430 
5431 		its_cpu_init_lpis();
5432 		its_cpu_init_collections();
5433 	}
5434 
5435 	return 0;
5436 }
5437 
rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct * work)5438 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5439 {
5440 	cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5441 	gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5442 }
5443 
5444 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5445 		    rdist_memreserve_cpuhp_cleanup_workfn);
5446 
its_cpu_memreserve_lpi(unsigned int cpu)5447 static int its_cpu_memreserve_lpi(unsigned int cpu)
5448 {
5449 	struct page *pend_page;
5450 	int ret = 0;
5451 
5452 	/* This gets to run exactly once per CPU */
5453 	if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5454 		return 0;
5455 
5456 	pend_page = gic_data_rdist()->pend_page;
5457 	if (WARN_ON(!pend_page)) {
5458 		ret = -ENOMEM;
5459 		goto out;
5460 	}
5461 	/*
5462 	 * If the pending table was pre-programmed, free the memory we
5463 	 * preemptively allocated. Otherwise, reserve that memory for
5464 	 * later kexecs.
5465 	 */
5466 	if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5467 		its_free_pending_table(pend_page);
5468 		gic_data_rdist()->pend_page = NULL;
5469 	} else {
5470 		phys_addr_t paddr = page_to_phys(pend_page);
5471 		WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5472 	}
5473 
5474 out:
5475 	/* Last CPU being brought up gets to issue the cleanup */
5476 	if (!IS_ENABLED(CONFIG_SMP) ||
5477 	    cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5478 		schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5479 
5480 	gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5481 	return ret;
5482 }
5483 
5484 /* Mark all the BASER registers as invalid before they get reprogrammed */
its_reset_one(struct resource * res)5485 static int __init its_reset_one(struct resource *res)
5486 {
5487 	void __iomem *its_base;
5488 	int err, i;
5489 
5490 	its_base = its_map_one(res, &err);
5491 	if (!its_base)
5492 		return err;
5493 
5494 	for (i = 0; i < GITS_BASER_NR_REGS; i++)
5495 		gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5496 
5497 	iounmap(its_base);
5498 	return 0;
5499 }
5500 
5501 static const struct of_device_id its_device_id[] = {
5502 	{	.compatible	= "arm,gic-v3-its",	},
5503 	{},
5504 };
5505 
its_node_init(struct resource * res,struct fwnode_handle * handle,int numa_node)5506 static struct its_node __init *its_node_init(struct resource *res,
5507 					     struct fwnode_handle *handle, int numa_node)
5508 {
5509 	void __iomem *its_base;
5510 	struct its_node *its;
5511 	int err;
5512 
5513 	its_base = its_map_one(res, &err);
5514 	if (!its_base)
5515 		return NULL;
5516 
5517 	pr_info("ITS %pR\n", res);
5518 
5519 	its = kzalloc_obj(*its);
5520 	if (!its)
5521 		goto out_unmap;
5522 
5523 	raw_spin_lock_init(&its->lock);
5524 	mutex_init(&its->dev_alloc_lock);
5525 	INIT_LIST_HEAD(&its->entry);
5526 	INIT_LIST_HEAD(&its->its_device_list);
5527 
5528 	its->typer = gic_read_typer(its_base + GITS_TYPER);
5529 	its->base = its_base;
5530 	its->phys_base = res->start;
5531 	its->get_msi_base = its_irq_get_msi_base;
5532 	its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
5533 
5534 	its->numa_node = numa_node;
5535 	its->fwnode_handle = handle;
5536 
5537 	return its;
5538 
5539 out_unmap:
5540 	iounmap(its_base);
5541 	return NULL;
5542 }
5543 
its_node_destroy(struct its_node * its)5544 static void its_node_destroy(struct its_node *its)
5545 {
5546 	iounmap(its->base);
5547 	kfree(its);
5548 }
5549 
its_of_probe(struct device_node * node)5550 static int __init its_of_probe(struct device_node *node)
5551 {
5552 	struct device_node *np;
5553 	struct resource res;
5554 	int err;
5555 
5556 	/*
5557 	 * Make sure *all* the ITS are reset before we probe any, as
5558 	 * they may be sharing memory. If any of the ITS fails to
5559 	 * reset, don't even try to go any further, as this could
5560 	 * result in something even worse.
5561 	 */
5562 	for (np = of_find_matching_node(node, its_device_id); np;
5563 	     np = of_find_matching_node(np, its_device_id)) {
5564 		if (!of_device_is_available(np) ||
5565 		    !of_property_read_bool(np, "msi-controller") ||
5566 		    of_address_to_resource(np, 0, &res))
5567 			continue;
5568 
5569 		err = its_reset_one(&res);
5570 		if (err)
5571 			return err;
5572 	}
5573 
5574 	for (np = of_find_matching_node(node, its_device_id); np;
5575 	     np = of_find_matching_node(np, its_device_id)) {
5576 		struct its_node *its;
5577 
5578 		if (!of_device_is_available(np))
5579 			continue;
5580 		if (!of_property_read_bool(np, "msi-controller")) {
5581 			pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5582 				np);
5583 			continue;
5584 		}
5585 
5586 		if (of_address_to_resource(np, 0, &res)) {
5587 			pr_warn("%pOF: no regs?\n", np);
5588 			continue;
5589 		}
5590 
5591 
5592 		its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5593 		if (!its)
5594 			return -ENOMEM;
5595 
5596 		err = its_probe_one(its);
5597 		if (err)  {
5598 			its_node_destroy(its);
5599 			return err;
5600 		}
5601 	}
5602 	return 0;
5603 }
5604 
5605 #ifdef CONFIG_ACPI
5606 
5607 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5608 
5609 #ifdef CONFIG_ACPI_NUMA
5610 struct its_srat_map {
5611 	/* numa node id */
5612 	u32	numa_node;
5613 	/* GIC ITS ID */
5614 	u32	its_id;
5615 };
5616 
5617 static struct its_srat_map *its_srat_maps __initdata;
5618 static int its_in_srat __initdata;
5619 
acpi_get_its_numa_node(u32 its_id)5620 static int __init acpi_get_its_numa_node(u32 its_id)
5621 {
5622 	int i;
5623 
5624 	for (i = 0; i < its_in_srat; i++) {
5625 		if (its_id == its_srat_maps[i].its_id)
5626 			return its_srat_maps[i].numa_node;
5627 	}
5628 	return NUMA_NO_NODE;
5629 }
5630 
gic_acpi_match_srat_its(union acpi_subtable_headers * header,const unsigned long end)5631 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5632 					  const unsigned long end)
5633 {
5634 	return 0;
5635 }
5636 
gic_acpi_parse_srat_its(union acpi_subtable_headers * header,const unsigned long end)5637 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5638 			 const unsigned long end)
5639 {
5640 	int node;
5641 	struct acpi_srat_gic_its_affinity *its_affinity;
5642 
5643 	its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5644 	if (!its_affinity)
5645 		return -EINVAL;
5646 
5647 	if (its_affinity->header.length < sizeof(*its_affinity)) {
5648 		pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5649 			its_affinity->header.length);
5650 		return -EINVAL;
5651 	}
5652 
5653 	/*
5654 	 * Note that in theory a new proximity node could be created by this
5655 	 * entry as it is an SRAT resource allocation structure.
5656 	 * We do not currently support doing so.
5657 	 */
5658 	node = pxm_to_node(its_affinity->proximity_domain);
5659 
5660 	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5661 		pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5662 		return 0;
5663 	}
5664 
5665 	its_srat_maps[its_in_srat].numa_node = node;
5666 	its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5667 	its_in_srat++;
5668 	pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5669 		its_affinity->proximity_domain, its_affinity->its_id, node);
5670 
5671 	return 0;
5672 }
5673 
acpi_table_parse_srat_its(void)5674 static void __init acpi_table_parse_srat_its(void)
5675 {
5676 	int count;
5677 
5678 	count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5679 			sizeof(struct acpi_table_srat),
5680 			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5681 			gic_acpi_match_srat_its, 0);
5682 	if (count <= 0)
5683 		return;
5684 
5685 	its_srat_maps = kmalloc_objs(struct its_srat_map, count);
5686 	if (!its_srat_maps)
5687 		return;
5688 
5689 	acpi_table_parse_entries(ACPI_SIG_SRAT,
5690 			sizeof(struct acpi_table_srat),
5691 			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5692 			gic_acpi_parse_srat_its, 0);
5693 }
5694 
5695 /* free the its_srat_maps after ITS probing */
acpi_its_srat_maps_free(void)5696 static void __init acpi_its_srat_maps_free(void)
5697 {
5698 	kfree(its_srat_maps);
5699 }
5700 #else
acpi_table_parse_srat_its(void)5701 static void __init acpi_table_parse_srat_its(void)	{ }
acpi_get_its_numa_node(u32 its_id)5702 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
acpi_its_srat_maps_free(void)5703 static void __init acpi_its_srat_maps_free(void) { }
5704 #endif
5705 
gic_acpi_parse_madt_its(union acpi_subtable_headers * header,const unsigned long end)5706 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5707 					  const unsigned long end)
5708 {
5709 	struct acpi_madt_generic_translator *its_entry;
5710 	struct fwnode_handle *dom_handle;
5711 	struct its_node *its;
5712 	struct resource res;
5713 	int err;
5714 
5715 	its_entry = (struct acpi_madt_generic_translator *)header;
5716 	memset(&res, 0, sizeof(res));
5717 	res.start = its_entry->base_address;
5718 	res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5719 	res.flags = IORESOURCE_MEM;
5720 
5721 	dom_handle = irq_domain_alloc_fwnode(&res.start);
5722 	if (!dom_handle) {
5723 		pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5724 		       &res.start);
5725 		return -ENOMEM;
5726 	}
5727 
5728 	err = iort_register_domain_token(its_entry->translation_id, res.start,
5729 					 dom_handle);
5730 	if (err) {
5731 		pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5732 		       &res.start, its_entry->translation_id);
5733 		goto dom_err;
5734 	}
5735 
5736 	its = its_node_init(&res, dom_handle,
5737 			    acpi_get_its_numa_node(its_entry->translation_id));
5738 	if (!its) {
5739 		err = -ENOMEM;
5740 		goto node_err;
5741 	}
5742 
5743 	if (acpi_get_madt_revision() >= 7 &&
5744 	    (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
5745 		its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
5746 
5747 	err = its_probe_one(its);
5748 	if (!err)
5749 		return 0;
5750 
5751 node_err:
5752 	iort_deregister_domain_token(its_entry->translation_id);
5753 dom_err:
5754 	irq_domain_free_fwnode(dom_handle);
5755 	return err;
5756 }
5757 
its_acpi_reset(union acpi_subtable_headers * header,const unsigned long end)5758 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5759 				 const unsigned long end)
5760 {
5761 	struct acpi_madt_generic_translator *its_entry;
5762 	struct resource res;
5763 
5764 	its_entry = (struct acpi_madt_generic_translator *)header;
5765 	res = (struct resource) {
5766 		.start	= its_entry->base_address,
5767 		.end	= its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5768 		.flags	= IORESOURCE_MEM,
5769 	};
5770 
5771 	return its_reset_one(&res);
5772 }
5773 
its_acpi_probe(void)5774 static void __init its_acpi_probe(void)
5775 {
5776 	acpi_table_parse_srat_its();
5777 	/*
5778 	 * Make sure *all* the ITS are reset before we probe any, as
5779 	 * they may be sharing memory. If any of the ITS fails to
5780 	 * reset, don't even try to go any further, as this could
5781 	 * result in something even worse.
5782 	 */
5783 	if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5784 				  its_acpi_reset, 0) > 0)
5785 		acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5786 				      gic_acpi_parse_madt_its, 0);
5787 	acpi_its_srat_maps_free();
5788 }
5789 #else
its_acpi_probe(void)5790 static void __init its_acpi_probe(void) { }
5791 #endif
5792 
its_lpi_memreserve_init(void)5793 int __init its_lpi_memreserve_init(void)
5794 {
5795 	int state;
5796 
5797 	if (!efi_enabled(EFI_CONFIG_TABLES))
5798 		return 0;
5799 
5800 	if (list_empty(&its_nodes))
5801 		return 0;
5802 
5803 	gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5804 	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5805 				  "irqchip/arm/gicv3/memreserve:online",
5806 				  its_cpu_memreserve_lpi,
5807 				  NULL);
5808 	if (state < 0)
5809 		return state;
5810 
5811 	gic_rdists->cpuhp_memreserve_state = state;
5812 
5813 	return 0;
5814 }
5815 
its_init(struct fwnode_handle * handle,struct rdists * rdists,struct irq_domain * parent_domain,u8 irq_prio)5816 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5817 		    struct irq_domain *parent_domain, u8 irq_prio)
5818 {
5819 	struct device_node *of_node;
5820 	struct its_node *its;
5821 	bool has_v4 = false;
5822 	bool has_v4_1 = false;
5823 	int err;
5824 
5825 	itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
5826 	if (!itt_pool)
5827 		return -ENOMEM;
5828 
5829 	gic_rdists = rdists;
5830 
5831 	lpi_prop_prio = irq_prio;
5832 	its_parent = parent_domain;
5833 	of_node = to_of_node(handle);
5834 	if (of_node)
5835 		its_of_probe(of_node);
5836 	else
5837 		its_acpi_probe();
5838 
5839 	if (list_empty(&its_nodes)) {
5840 		pr_warn("ITS: No ITS available, not enabling LPIs\n");
5841 		return -ENXIO;
5842 	}
5843 
5844 	err = allocate_lpi_tables();
5845 	if (err)
5846 		return err;
5847 
5848 	list_for_each_entry(its, &its_nodes, entry) {
5849 		has_v4 |= is_v4(its);
5850 		has_v4_1 |= is_v4_1(its);
5851 	}
5852 
5853 	/* Don't bother with inconsistent systems */
5854 	if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5855 		rdists->has_rvpeid = false;
5856 
5857 	if (has_v4 & rdists->has_vlpis) {
5858 		const struct irq_domain_ops *sgi_ops;
5859 
5860 		if (has_v4_1)
5861 			sgi_ops = &its_sgi_domain_ops;
5862 		else
5863 			sgi_ops = NULL;
5864 
5865 		if (its_init_vpe_domain() ||
5866 		    its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5867 			rdists->has_vlpis = false;
5868 			pr_err("ITS: Disabling GICv4 support\n");
5869 		}
5870 	}
5871 
5872 	register_syscore(&its_syscore);
5873 
5874 	return 0;
5875 }
5876