xref: /linux/arch/arm64/kvm/vgic/vgic-its.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * GICv3 ITS emulation
4  *
5  * Copyright (C) 2015,2016 ARM Ltd.
6  * Author: Andre Przywara <andre.przywara@arm.com>
7  */
8 
9 #include <linux/cpu.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/uaccess.h>
15 #include <linux/list_sort.h>
16 
17 #include <linux/irqchip/arm-gic-v3.h>
18 
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_arm.h>
21 #include <asm/kvm_mmu.h>
22 
23 #include "vgic.h"
24 #include "vgic-mmio.h"
25 
26 static struct kvm_device_ops kvm_arm_vgic_its_ops;
27 
28 static int vgic_its_save_tables_v0(struct vgic_its *its);
29 static int vgic_its_restore_tables_v0(struct vgic_its *its);
30 static int vgic_its_commit_v0(struct vgic_its *its);
31 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
32 			     struct kvm_vcpu *filter_vcpu, bool needs_inv);
33 
34 #define vgic_its_read_entry_lock(i, g, valp, t)				\
35 	({								\
36 		int __sz = vgic_its_get_abi(i)->t##_esz;		\
37 		struct kvm *__k = (i)->dev->kvm;			\
38 		int __ret;						\
39 									\
40 		BUILD_BUG_ON(NR_ITS_ABIS == 1 &&			\
41 			     sizeof(*(valp)) != ABI_0_ESZ);		\
42 		if (NR_ITS_ABIS > 1 &&					\
43 		    KVM_BUG_ON(__sz != sizeof(*(valp)), __k))		\
44 			__ret = -EINVAL;				\
45 		else							\
46 			__ret = kvm_read_guest_lock(__k, (g),		\
47 						    valp, __sz);	\
48 		__ret;							\
49 	})
50 
51 #define vgic_its_write_entry_lock(i, g, val, t)				\
52 	({								\
53 		int __sz = vgic_its_get_abi(i)->t##_esz;		\
54 		struct kvm *__k = (i)->dev->kvm;			\
55 		typeof(val) __v = (val);				\
56 		int __ret;						\
57 									\
58 		BUILD_BUG_ON(NR_ITS_ABIS == 1 &&			\
59 			     sizeof(__v) != ABI_0_ESZ);			\
60 		if (NR_ITS_ABIS > 1 &&					\
61 		    KVM_BUG_ON(__sz != sizeof(__v), __k))		\
62 			__ret = -EINVAL;				\
63 		else							\
64 			__ret = vgic_write_guest_lock(__k, (g),		\
65 						      &__v, __sz);	\
66 		__ret;							\
67 	})
68 
69 /*
70  * Creates a new (reference to a) struct vgic_irq for a given LPI.
71  * If this LPI is already mapped on another ITS, we increase its refcount
72  * and return a pointer to the existing structure.
73  * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
74  * This function returns a pointer to the _unlocked_ structure.
75  */
vgic_add_lpi(struct kvm * kvm,u32 intid,struct kvm_vcpu * vcpu)76 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
77 				     struct kvm_vcpu *vcpu)
78 {
79 	struct vgic_dist *dist = &kvm->arch.vgic;
80 	struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
81 	unsigned long flags;
82 	int ret;
83 
84 	/* In this case there is no put, since we keep the reference. */
85 	if (irq)
86 		return irq;
87 
88 	irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
89 	if (!irq)
90 		return ERR_PTR(-ENOMEM);
91 
92 	ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
93 	if (ret) {
94 		kfree(irq);
95 		return ERR_PTR(ret);
96 	}
97 
98 	INIT_LIST_HEAD(&irq->ap_list);
99 	raw_spin_lock_init(&irq->irq_lock);
100 
101 	irq->config = VGIC_CONFIG_EDGE;
102 	kref_init(&irq->refcount);
103 	irq->intid = intid;
104 	irq->target_vcpu = vcpu;
105 	irq->group = 1;
106 
107 	xa_lock_irqsave(&dist->lpi_xa, flags);
108 
109 	/*
110 	 * There could be a race with another vgic_add_lpi(), so we need to
111 	 * check that we don't add a second list entry with the same LPI.
112 	 */
113 	oldirq = xa_load(&dist->lpi_xa, intid);
114 	if (vgic_try_get_irq_kref(oldirq)) {
115 		/* Someone was faster with adding this LPI, lets use that. */
116 		kfree(irq);
117 		irq = oldirq;
118 
119 		goto out_unlock;
120 	}
121 
122 	ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
123 	if (ret) {
124 		xa_release(&dist->lpi_xa, intid);
125 		kfree(irq);
126 	}
127 
128 out_unlock:
129 	xa_unlock_irqrestore(&dist->lpi_xa, flags);
130 
131 	if (ret)
132 		return ERR_PTR(ret);
133 
134 	/*
135 	 * We "cache" the configuration table entries in our struct vgic_irq's.
136 	 * However we only have those structs for mapped IRQs, so we read in
137 	 * the respective config data from memory here upon mapping the LPI.
138 	 *
139 	 * Should any of these fail, behave as if we couldn't create the LPI
140 	 * by dropping the refcount and returning the error.
141 	 */
142 	ret = update_lpi_config(kvm, irq, NULL, false);
143 	if (ret) {
144 		vgic_put_irq(kvm, irq);
145 		return ERR_PTR(ret);
146 	}
147 
148 	ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
149 	if (ret) {
150 		vgic_put_irq(kvm, irq);
151 		return ERR_PTR(ret);
152 	}
153 
154 	return irq;
155 }
156 
157 /**
158  * struct vgic_its_abi - ITS abi ops and settings
159  * @cte_esz: collection table entry size
160  * @dte_esz: device table entry size
161  * @ite_esz: interrupt translation table entry size
162  * @save_tables: save the ITS tables into guest RAM
163  * @restore_tables: restore the ITS internal structs from tables
164  *  stored in guest RAM
165  * @commit: initialize the registers which expose the ABI settings,
166  *  especially the entry sizes
167  */
168 struct vgic_its_abi {
169 	int cte_esz;
170 	int dte_esz;
171 	int ite_esz;
172 	int (*save_tables)(struct vgic_its *its);
173 	int (*restore_tables)(struct vgic_its *its);
174 	int (*commit)(struct vgic_its *its);
175 };
176 
177 #define ABI_0_ESZ	8
178 #define ESZ_MAX		ABI_0_ESZ
179 
180 static const struct vgic_its_abi its_table_abi_versions[] = {
181 	[0] = {
182 	 .cte_esz = ABI_0_ESZ,
183 	 .dte_esz = ABI_0_ESZ,
184 	 .ite_esz = ABI_0_ESZ,
185 	 .save_tables = vgic_its_save_tables_v0,
186 	 .restore_tables = vgic_its_restore_tables_v0,
187 	 .commit = vgic_its_commit_v0,
188 	},
189 };
190 
191 #define NR_ITS_ABIS	ARRAY_SIZE(its_table_abi_versions)
192 
vgic_its_get_abi(struct vgic_its * its)193 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
194 {
195 	return &its_table_abi_versions[its->abi_rev];
196 }
197 
vgic_its_set_abi(struct vgic_its * its,u32 rev)198 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
199 {
200 	const struct vgic_its_abi *abi;
201 
202 	its->abi_rev = rev;
203 	abi = vgic_its_get_abi(its);
204 	return abi->commit(its);
205 }
206 
207 /*
208  * Find and returns a device in the device table for an ITS.
209  * Must be called with the its_lock mutex held.
210  */
find_its_device(struct vgic_its * its,u32 device_id)211 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
212 {
213 	struct its_device *device;
214 
215 	list_for_each_entry(device, &its->device_list, dev_list)
216 		if (device_id == device->device_id)
217 			return device;
218 
219 	return NULL;
220 }
221 
222 /*
223  * Find and returns an interrupt translation table entry (ITTE) for a given
224  * Device ID/Event ID pair on an ITS.
225  * Must be called with the its_lock mutex held.
226  */
find_ite(struct vgic_its * its,u32 device_id,u32 event_id)227 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
228 				  u32 event_id)
229 {
230 	struct its_device *device;
231 	struct its_ite *ite;
232 
233 	device = find_its_device(its, device_id);
234 	if (device == NULL)
235 		return NULL;
236 
237 	list_for_each_entry(ite, &device->itt_head, ite_list)
238 		if (ite->event_id == event_id)
239 			return ite;
240 
241 	return NULL;
242 }
243 
244 /* To be used as an iterator this macro misses the enclosing parentheses */
245 #define for_each_lpi_its(dev, ite, its) \
246 	list_for_each_entry(dev, &(its)->device_list, dev_list) \
247 		list_for_each_entry(ite, &(dev)->itt_head, ite_list)
248 
249 #define GIC_LPI_OFFSET 8192
250 
251 #define VITS_TYPER_IDBITS		16
252 #define VITS_MAX_EVENTID		(BIT(VITS_TYPER_IDBITS) - 1)
253 #define VITS_TYPER_DEVBITS		16
254 #define VITS_MAX_DEVID			(BIT(VITS_TYPER_DEVBITS) - 1)
255 #define VITS_DTE_MAX_DEVID_OFFSET	(BIT(14) - 1)
256 #define VITS_ITE_MAX_EVENTID_OFFSET	(BIT(16) - 1)
257 
258 /*
259  * Finds and returns a collection in the ITS collection table.
260  * Must be called with the its_lock mutex held.
261  */
find_collection(struct vgic_its * its,int coll_id)262 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
263 {
264 	struct its_collection *collection;
265 
266 	list_for_each_entry(collection, &its->collection_list, coll_list) {
267 		if (coll_id == collection->collection_id)
268 			return collection;
269 	}
270 
271 	return NULL;
272 }
273 
274 #define LPI_PROP_ENABLE_BIT(p)	((p) & LPI_PROP_ENABLED)
275 #define LPI_PROP_PRIORITY(p)	((p) & 0xfc)
276 
277 /*
278  * Reads the configuration data for a given LPI from guest memory and
279  * updates the fields in struct vgic_irq.
280  * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
281  * VCPU. Unconditionally applies if filter_vcpu is NULL.
282  */
update_lpi_config(struct kvm * kvm,struct vgic_irq * irq,struct kvm_vcpu * filter_vcpu,bool needs_inv)283 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
284 			     struct kvm_vcpu *filter_vcpu, bool needs_inv)
285 {
286 	u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
287 	u8 prop;
288 	int ret;
289 	unsigned long flags;
290 
291 	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
292 				  &prop, 1);
293 
294 	if (ret)
295 		return ret;
296 
297 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
298 
299 	if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
300 		irq->priority = LPI_PROP_PRIORITY(prop);
301 		irq->enabled = LPI_PROP_ENABLE_BIT(prop);
302 
303 		if (!irq->hw) {
304 			vgic_queue_irq_unlock(kvm, irq, flags);
305 			return 0;
306 		}
307 	}
308 
309 	if (irq->hw)
310 		ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
311 
312 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
313 	return ret;
314 }
315 
update_affinity(struct vgic_irq * irq,struct kvm_vcpu * vcpu)316 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
317 {
318 	struct its_vlpi_map map;
319 	int ret;
320 
321 	guard(raw_spinlock_irqsave)(&irq->irq_lock);
322 	irq->target_vcpu = vcpu;
323 
324 	if (!irq->hw)
325 		return 0;
326 
327 	ret = its_get_vlpi(irq->host_irq, &map);
328 	if (ret)
329 		return ret;
330 
331 	if (map.vpe)
332 		atomic_dec(&map.vpe->vlpi_count);
333 
334 	map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
335 	atomic_inc(&map.vpe->vlpi_count);
336 	return its_map_vlpi(irq->host_irq, &map);
337 }
338 
collection_to_vcpu(struct kvm * kvm,struct its_collection * col)339 static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
340 					   struct its_collection *col)
341 {
342 	return kvm_get_vcpu_by_id(kvm, col->target_addr);
343 }
344 
345 /*
346  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
347  * is targeting) to the VGIC's view, which deals with target VCPUs.
348  * Needs to be called whenever either the collection for a LPIs has
349  * changed or the collection itself got retargeted.
350  */
update_affinity_ite(struct kvm * kvm,struct its_ite * ite)351 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
352 {
353 	struct kvm_vcpu *vcpu;
354 
355 	if (!its_is_collection_mapped(ite->collection))
356 		return;
357 
358 	vcpu = collection_to_vcpu(kvm, ite->collection);
359 	update_affinity(ite->irq, vcpu);
360 }
361 
362 /*
363  * Updates the target VCPU for every LPI targeting this collection.
364  * Must be called with the its_lock mutex held.
365  */
update_affinity_collection(struct kvm * kvm,struct vgic_its * its,struct its_collection * coll)366 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
367 				       struct its_collection *coll)
368 {
369 	struct its_device *device;
370 	struct its_ite *ite;
371 
372 	for_each_lpi_its(device, ite, its) {
373 		if (ite->collection != coll)
374 			continue;
375 
376 		update_affinity_ite(kvm, ite);
377 	}
378 }
379 
max_lpis_propbaser(u64 propbaser)380 static u32 max_lpis_propbaser(u64 propbaser)
381 {
382 	int nr_idbits = (propbaser & 0x1f) + 1;
383 
384 	return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
385 }
386 
387 /*
388  * Sync the pending table pending bit of LPIs targeting @vcpu
389  * with our own data structures. This relies on the LPI being
390  * mapped before.
391  */
its_sync_lpi_pending_table(struct kvm_vcpu * vcpu)392 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
393 {
394 	gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
395 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
396 	unsigned long intid, flags;
397 	struct vgic_irq *irq;
398 	int last_byte_offset = -1;
399 	int ret = 0;
400 	u8 pendmask;
401 
402 	xa_for_each(&dist->lpi_xa, intid, irq) {
403 		int byte_offset, bit_nr;
404 
405 		byte_offset = intid / BITS_PER_BYTE;
406 		bit_nr = intid % BITS_PER_BYTE;
407 
408 		/*
409 		 * For contiguously allocated LPIs chances are we just read
410 		 * this very same byte in the last iteration. Reuse that.
411 		 */
412 		if (byte_offset != last_byte_offset) {
413 			ret = kvm_read_guest_lock(vcpu->kvm,
414 						  pendbase + byte_offset,
415 						  &pendmask, 1);
416 			if (ret)
417 				return ret;
418 
419 			last_byte_offset = byte_offset;
420 		}
421 
422 		irq = vgic_get_irq(vcpu->kvm, intid);
423 		if (!irq)
424 			continue;
425 
426 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
427 		if (irq->target_vcpu == vcpu)
428 			irq->pending_latch = pendmask & (1U << bit_nr);
429 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
430 		vgic_put_irq(vcpu->kvm, irq);
431 	}
432 
433 	return ret;
434 }
435 
vgic_mmio_read_its_typer(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)436 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
437 					      struct vgic_its *its,
438 					      gpa_t addr, unsigned int len)
439 {
440 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
441 	u64 reg = GITS_TYPER_PLPIS;
442 
443 	/*
444 	 * We use linear CPU numbers for redistributor addressing,
445 	 * so GITS_TYPER.PTA is 0.
446 	 * Also we force all PROPBASER registers to be the same, so
447 	 * CommonLPIAff is 0 as well.
448 	 * To avoid memory waste in the guest, we keep the number of IDBits and
449 	 * DevBits low - as least for the time being.
450 	 */
451 	reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
452 	reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
453 	reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
454 
455 	return extract_bytes(reg, addr & 7, len);
456 }
457 
vgic_mmio_read_its_iidr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)458 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
459 					     struct vgic_its *its,
460 					     gpa_t addr, unsigned int len)
461 {
462 	u32 val;
463 
464 	val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
465 	val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
466 	return val;
467 }
468 
vgic_mmio_uaccess_write_its_iidr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)469 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
470 					    struct vgic_its *its,
471 					    gpa_t addr, unsigned int len,
472 					    unsigned long val)
473 {
474 	u32 rev = GITS_IIDR_REV(val);
475 
476 	if (rev >= NR_ITS_ABIS)
477 		return -EINVAL;
478 	return vgic_its_set_abi(its, rev);
479 }
480 
vgic_mmio_read_its_idregs(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)481 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
482 					       struct vgic_its *its,
483 					       gpa_t addr, unsigned int len)
484 {
485 	switch (addr & 0xffff) {
486 	case GITS_PIDR0:
487 		return 0x92;	/* part number, bits[7:0] */
488 	case GITS_PIDR1:
489 		return 0xb4;	/* part number, bits[11:8] */
490 	case GITS_PIDR2:
491 		return GIC_PIDR2_ARCH_GICv3 | 0x0b;
492 	case GITS_PIDR4:
493 		return 0x40;	/* This is a 64K software visible page */
494 	/* The following are the ID registers for (any) GIC. */
495 	case GITS_CIDR0:
496 		return 0x0d;
497 	case GITS_CIDR1:
498 		return 0xf0;
499 	case GITS_CIDR2:
500 		return 0x05;
501 	case GITS_CIDR3:
502 		return 0xb1;
503 	}
504 
505 	return 0;
506 }
507 
__vgic_doorbell_to_its(struct kvm * kvm,gpa_t db)508 static struct vgic_its *__vgic_doorbell_to_its(struct kvm *kvm, gpa_t db)
509 {
510 	struct kvm_io_device *kvm_io_dev;
511 	struct vgic_io_device *iodev;
512 
513 	kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, db);
514 	if (!kvm_io_dev)
515 		return ERR_PTR(-EINVAL);
516 
517 	if (kvm_io_dev->ops != &kvm_io_gic_ops)
518 		return ERR_PTR(-EINVAL);
519 
520 	iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
521 	if (iodev->iodev_type != IODEV_ITS)
522 		return ERR_PTR(-EINVAL);
523 
524 	return iodev->its;
525 }
526 
vgic_its_cache_key(u32 devid,u32 eventid)527 static unsigned long vgic_its_cache_key(u32 devid, u32 eventid)
528 {
529 	return (((unsigned long)devid) << VITS_TYPER_IDBITS) | eventid;
530 
531 }
532 
vgic_its_check_cache(struct kvm * kvm,phys_addr_t db,u32 devid,u32 eventid)533 static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
534 					     u32 devid, u32 eventid)
535 {
536 	unsigned long cache_key = vgic_its_cache_key(devid, eventid);
537 	struct vgic_its *its;
538 	struct vgic_irq *irq;
539 
540 	if (devid > VITS_MAX_DEVID || eventid > VITS_MAX_EVENTID)
541 		return NULL;
542 
543 	its = __vgic_doorbell_to_its(kvm, db);
544 	if (IS_ERR(its))
545 		return NULL;
546 
547 	rcu_read_lock();
548 
549 	irq = xa_load(&its->translation_cache, cache_key);
550 	if (!vgic_try_get_irq_kref(irq))
551 		irq = NULL;
552 
553 	rcu_read_unlock();
554 
555 	return irq;
556 }
557 
vgic_its_cache_translation(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid,struct vgic_irq * irq)558 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
559 				       u32 devid, u32 eventid,
560 				       struct vgic_irq *irq)
561 {
562 	unsigned long cache_key = vgic_its_cache_key(devid, eventid);
563 	struct vgic_irq *old;
564 
565 	/* Do not cache a directly injected interrupt */
566 	if (irq->hw)
567 		return;
568 
569 	/*
570 	 * The irq refcount is guaranteed to be nonzero while holding the
571 	 * its_lock, as the ITE (and the reference it holds) cannot be freed.
572 	 */
573 	lockdep_assert_held(&its->its_lock);
574 	vgic_get_irq_kref(irq);
575 
576 	old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
577 
578 	/*
579 	 * Put the reference taken on @irq if the store fails. Intentionally do
580 	 * not return the error as the translation cache is best effort.
581 	 */
582 	if (xa_is_err(old)) {
583 		vgic_put_irq(kvm, irq);
584 		return;
585 	}
586 
587 	/*
588 	 * We could have raced with another CPU caching the same
589 	 * translation behind our back, ensure we don't leak a
590 	 * reference if that is the case.
591 	 */
592 	if (old)
593 		vgic_put_irq(kvm, old);
594 }
595 
vgic_its_invalidate_cache(struct vgic_its * its)596 static void vgic_its_invalidate_cache(struct vgic_its *its)
597 {
598 	struct kvm *kvm = its->dev->kvm;
599 	struct vgic_irq *irq;
600 	unsigned long idx;
601 
602 	xa_for_each(&its->translation_cache, idx, irq) {
603 		xa_erase(&its->translation_cache, idx);
604 		vgic_put_irq(kvm, irq);
605 	}
606 }
607 
vgic_its_invalidate_all_caches(struct kvm * kvm)608 void vgic_its_invalidate_all_caches(struct kvm *kvm)
609 {
610 	struct kvm_device *dev;
611 	struct vgic_its *its;
612 
613 	rcu_read_lock();
614 
615 	list_for_each_entry_rcu(dev, &kvm->devices, vm_node) {
616 		if (dev->ops != &kvm_arm_vgic_its_ops)
617 			continue;
618 
619 		its = dev->private;
620 		vgic_its_invalidate_cache(its);
621 	}
622 
623 	rcu_read_unlock();
624 }
625 
vgic_its_resolve_lpi(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid,struct vgic_irq ** irq)626 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
627 			 u32 devid, u32 eventid, struct vgic_irq **irq)
628 {
629 	struct kvm_vcpu *vcpu;
630 	struct its_ite *ite;
631 
632 	if (!its->enabled)
633 		return -EBUSY;
634 
635 	ite = find_ite(its, devid, eventid);
636 	if (!ite || !its_is_collection_mapped(ite->collection))
637 		return E_ITS_INT_UNMAPPED_INTERRUPT;
638 
639 	vcpu = collection_to_vcpu(kvm, ite->collection);
640 	if (!vcpu)
641 		return E_ITS_INT_UNMAPPED_INTERRUPT;
642 
643 	if (!vgic_lpis_enabled(vcpu))
644 		return -EBUSY;
645 
646 	vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
647 
648 	*irq = ite->irq;
649 	return 0;
650 }
651 
vgic_msi_to_its(struct kvm * kvm,struct kvm_msi * msi)652 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
653 {
654 	u64 address;
655 
656 	if (!vgic_has_its(kvm))
657 		return ERR_PTR(-ENODEV);
658 
659 	if (!(msi->flags & KVM_MSI_VALID_DEVID))
660 		return ERR_PTR(-EINVAL);
661 
662 	address = (u64)msi->address_hi << 32 | msi->address_lo;
663 
664 	return __vgic_doorbell_to_its(kvm, address);
665 }
666 
667 /*
668  * Find the target VCPU and the LPI number for a given devid/eventid pair
669  * and make this IRQ pending, possibly injecting it.
670  * Must be called with the its_lock mutex held.
671  * Returns 0 on success, a positive error value for any ITS mapping
672  * related errors and negative error values for generic errors.
673  */
vgic_its_trigger_msi(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid)674 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
675 				u32 devid, u32 eventid)
676 {
677 	struct vgic_irq *irq = NULL;
678 	unsigned long flags;
679 	int err;
680 
681 	err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
682 	if (err)
683 		return err;
684 
685 	if (irq->hw)
686 		return irq_set_irqchip_state(irq->host_irq,
687 					     IRQCHIP_STATE_PENDING, true);
688 
689 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
690 	irq->pending_latch = true;
691 	vgic_queue_irq_unlock(kvm, irq, flags);
692 
693 	return 0;
694 }
695 
vgic_its_inject_cached_translation(struct kvm * kvm,struct kvm_msi * msi)696 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
697 {
698 	struct vgic_irq *irq;
699 	unsigned long flags;
700 	phys_addr_t db;
701 
702 	db = (u64)msi->address_hi << 32 | msi->address_lo;
703 	irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
704 	if (!irq)
705 		return -EWOULDBLOCK;
706 
707 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
708 	irq->pending_latch = true;
709 	vgic_queue_irq_unlock(kvm, irq, flags);
710 	vgic_put_irq(kvm, irq);
711 
712 	return 0;
713 }
714 
715 /*
716  * Queries the KVM IO bus framework to get the ITS pointer from the given
717  * doorbell address.
718  * We then call vgic_its_trigger_msi() with the decoded data.
719  * According to the KVM_SIGNAL_MSI API description returns 1 on success.
720  */
vgic_its_inject_msi(struct kvm * kvm,struct kvm_msi * msi)721 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
722 {
723 	struct vgic_its *its;
724 	int ret;
725 
726 	if (!vgic_its_inject_cached_translation(kvm, msi))
727 		return 1;
728 
729 	its = vgic_msi_to_its(kvm, msi);
730 	if (IS_ERR(its))
731 		return PTR_ERR(its);
732 
733 	mutex_lock(&its->its_lock);
734 	ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
735 	mutex_unlock(&its->its_lock);
736 
737 	if (ret < 0)
738 		return ret;
739 
740 	/*
741 	 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
742 	 * if the guest has blocked the MSI. So we map any LPI mapping
743 	 * related error to that.
744 	 */
745 	if (ret)
746 		return 0;
747 	else
748 		return 1;
749 }
750 
751 /* Requires the its_lock to be held. */
its_free_ite(struct kvm * kvm,struct its_ite * ite)752 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
753 {
754 	struct vgic_irq *irq = ite->irq;
755 	list_del(&ite->ite_list);
756 
757 	/* This put matches the get in vgic_add_lpi. */
758 	if (irq) {
759 		scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
760 			if (irq->hw)
761 				its_unmap_vlpi(ite->irq->host_irq);
762 
763 			irq->hw = false;
764 		}
765 
766 		vgic_put_irq(kvm, ite->irq);
767 	}
768 
769 	kfree(ite);
770 }
771 
its_cmd_mask_field(u64 * its_cmd,int word,int shift,int size)772 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
773 {
774 	return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
775 }
776 
777 #define its_cmd_get_command(cmd)	its_cmd_mask_field(cmd, 0,  0,  8)
778 #define its_cmd_get_deviceid(cmd)	its_cmd_mask_field(cmd, 0, 32, 32)
779 #define its_cmd_get_size(cmd)		(its_cmd_mask_field(cmd, 1,  0,  5) + 1)
780 #define its_cmd_get_id(cmd)		its_cmd_mask_field(cmd, 1,  0, 32)
781 #define its_cmd_get_physical_id(cmd)	its_cmd_mask_field(cmd, 1, 32, 32)
782 #define its_cmd_get_collection(cmd)	its_cmd_mask_field(cmd, 2,  0, 16)
783 #define its_cmd_get_ittaddr(cmd)	(its_cmd_mask_field(cmd, 2,  8, 44) << 8)
784 #define its_cmd_get_target_addr(cmd)	its_cmd_mask_field(cmd, 2, 16, 32)
785 #define its_cmd_get_validbit(cmd)	its_cmd_mask_field(cmd, 2, 63,  1)
786 
787 /*
788  * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
789  * Must be called with the its_lock mutex held.
790  */
vgic_its_cmd_handle_discard(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)791 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
792 				       u64 *its_cmd)
793 {
794 	u32 device_id = its_cmd_get_deviceid(its_cmd);
795 	u32 event_id = its_cmd_get_id(its_cmd);
796 	struct its_ite *ite;
797 
798 	ite = find_ite(its, device_id, event_id);
799 	if (ite && its_is_collection_mapped(ite->collection)) {
800 		struct its_device *device = find_its_device(its, device_id);
801 		int ite_esz = vgic_its_get_abi(its)->ite_esz;
802 		gpa_t gpa = device->itt_addr + ite->event_id * ite_esz;
803 		/*
804 		 * Though the spec talks about removing the pending state, we
805 		 * don't bother here since we clear the ITTE anyway and the
806 		 * pending state is a property of the ITTE struct.
807 		 */
808 		vgic_its_invalidate_cache(its);
809 
810 		its_free_ite(kvm, ite);
811 
812 		return vgic_its_write_entry_lock(its, gpa, 0ULL, ite);
813 	}
814 
815 	return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
816 }
817 
818 /*
819  * The MOVI command moves an ITTE to a different collection.
820  * Must be called with the its_lock mutex held.
821  */
vgic_its_cmd_handle_movi(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)822 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
823 				    u64 *its_cmd)
824 {
825 	u32 device_id = its_cmd_get_deviceid(its_cmd);
826 	u32 event_id = its_cmd_get_id(its_cmd);
827 	u32 coll_id = its_cmd_get_collection(its_cmd);
828 	struct kvm_vcpu *vcpu;
829 	struct its_ite *ite;
830 	struct its_collection *collection;
831 
832 	ite = find_ite(its, device_id, event_id);
833 	if (!ite)
834 		return E_ITS_MOVI_UNMAPPED_INTERRUPT;
835 
836 	if (!its_is_collection_mapped(ite->collection))
837 		return E_ITS_MOVI_UNMAPPED_COLLECTION;
838 
839 	collection = find_collection(its, coll_id);
840 	if (!its_is_collection_mapped(collection))
841 		return E_ITS_MOVI_UNMAPPED_COLLECTION;
842 
843 	ite->collection = collection;
844 	vcpu = collection_to_vcpu(kvm, collection);
845 
846 	vgic_its_invalidate_cache(its);
847 
848 	return update_affinity(ite->irq, vcpu);
849 }
850 
__is_visible_gfn_locked(struct vgic_its * its,gpa_t gpa)851 static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa)
852 {
853 	gfn_t gfn = gpa >> PAGE_SHIFT;
854 	int idx;
855 	bool ret;
856 
857 	idx = srcu_read_lock(&its->dev->kvm->srcu);
858 	ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
859 	srcu_read_unlock(&its->dev->kvm->srcu, idx);
860 	return ret;
861 }
862 
863 /*
864  * Check whether an ID can be stored into the corresponding guest table.
865  * For a direct table this is pretty easy, but gets a bit nasty for
866  * indirect tables. We check whether the resulting guest physical address
867  * is actually valid (covered by a memslot and guest accessible).
868  * For this we have to read the respective first level entry.
869  */
vgic_its_check_id(struct vgic_its * its,u64 baser,u32 id,gpa_t * eaddr)870 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
871 			      gpa_t *eaddr)
872 {
873 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
874 	u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
875 	phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
876 	int esz = GITS_BASER_ENTRY_SIZE(baser);
877 	int index;
878 
879 	switch (type) {
880 	case GITS_BASER_TYPE_DEVICE:
881 		if (id > VITS_MAX_DEVID)
882 			return false;
883 		break;
884 	case GITS_BASER_TYPE_COLLECTION:
885 		/* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
886 		if (id >= BIT_ULL(16))
887 			return false;
888 		break;
889 	default:
890 		return false;
891 	}
892 
893 	if (!(baser & GITS_BASER_INDIRECT)) {
894 		phys_addr_t addr;
895 
896 		if (id >= (l1_tbl_size / esz))
897 			return false;
898 
899 		addr = base + id * esz;
900 
901 		if (eaddr)
902 			*eaddr = addr;
903 
904 		return __is_visible_gfn_locked(its, addr);
905 	}
906 
907 	/* calculate and check the index into the 1st level */
908 	index = id / (SZ_64K / esz);
909 	if (index >= (l1_tbl_size / sizeof(u64)))
910 		return false;
911 
912 	/* Each 1st level entry is represented by a 64-bit value. */
913 	if (kvm_read_guest_lock(its->dev->kvm,
914 			   base + index * sizeof(indirect_ptr),
915 			   &indirect_ptr, sizeof(indirect_ptr)))
916 		return false;
917 
918 	indirect_ptr = le64_to_cpu(indirect_ptr);
919 
920 	/* check the valid bit of the first level entry */
921 	if (!(indirect_ptr & BIT_ULL(63)))
922 		return false;
923 
924 	/* Mask the guest physical address and calculate the frame number. */
925 	indirect_ptr &= GENMASK_ULL(51, 16);
926 
927 	/* Find the address of the actual entry */
928 	index = id % (SZ_64K / esz);
929 	indirect_ptr += index * esz;
930 
931 	if (eaddr)
932 		*eaddr = indirect_ptr;
933 
934 	return __is_visible_gfn_locked(its, indirect_ptr);
935 }
936 
937 /*
938  * Check whether an event ID can be stored in the corresponding Interrupt
939  * Translation Table, which starts at device->itt_addr.
940  */
vgic_its_check_event_id(struct vgic_its * its,struct its_device * device,u32 event_id)941 static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device,
942 		u32 event_id)
943 {
944 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
945 	int ite_esz = abi->ite_esz;
946 	gpa_t gpa;
947 
948 	/* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */
949 	if (event_id >= BIT_ULL(device->num_eventid_bits))
950 		return false;
951 
952 	gpa = device->itt_addr + event_id * ite_esz;
953 	return __is_visible_gfn_locked(its, gpa);
954 }
955 
956 /*
957  * Add a new collection into the ITS collection table.
958  * Returns 0 on success, and a negative error value for generic errors.
959  */
vgic_its_alloc_collection(struct vgic_its * its,struct its_collection ** colp,u32 coll_id)960 static int vgic_its_alloc_collection(struct vgic_its *its,
961 				     struct its_collection **colp,
962 				     u32 coll_id)
963 {
964 	struct its_collection *collection;
965 
966 	collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT);
967 	if (!collection)
968 		return -ENOMEM;
969 
970 	collection->collection_id = coll_id;
971 	collection->target_addr = COLLECTION_NOT_MAPPED;
972 
973 	list_add_tail(&collection->coll_list, &its->collection_list);
974 	*colp = collection;
975 
976 	return 0;
977 }
978 
vgic_its_free_collection(struct vgic_its * its,u32 coll_id)979 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
980 {
981 	struct its_collection *collection;
982 	struct its_device *device;
983 	struct its_ite *ite;
984 
985 	/*
986 	 * Clearing the mapping for that collection ID removes the
987 	 * entry from the list. If there wasn't any before, we can
988 	 * go home early.
989 	 */
990 	collection = find_collection(its, coll_id);
991 	if (!collection)
992 		return;
993 
994 	for_each_lpi_its(device, ite, its)
995 		if (ite->collection &&
996 		    ite->collection->collection_id == coll_id)
997 			ite->collection = NULL;
998 
999 	list_del(&collection->coll_list);
1000 	kfree(collection);
1001 }
1002 
1003 /* Must be called with its_lock mutex held */
vgic_its_alloc_ite(struct its_device * device,struct its_collection * collection,u32 event_id)1004 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1005 					  struct its_collection *collection,
1006 					  u32 event_id)
1007 {
1008 	struct its_ite *ite;
1009 
1010 	ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT);
1011 	if (!ite)
1012 		return ERR_PTR(-ENOMEM);
1013 
1014 	ite->event_id	= event_id;
1015 	ite->collection = collection;
1016 
1017 	list_add_tail(&ite->ite_list, &device->itt_head);
1018 	return ite;
1019 }
1020 
1021 /*
1022  * The MAPTI and MAPI commands map LPIs to ITTEs.
1023  * Must be called with its_lock mutex held.
1024  */
vgic_its_cmd_handle_mapi(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1025 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1026 				    u64 *its_cmd)
1027 {
1028 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1029 	u32 event_id = its_cmd_get_id(its_cmd);
1030 	u32 coll_id = its_cmd_get_collection(its_cmd);
1031 	struct its_ite *ite;
1032 	struct kvm_vcpu *vcpu = NULL;
1033 	struct its_device *device;
1034 	struct its_collection *collection, *new_coll = NULL;
1035 	struct vgic_irq *irq;
1036 	int lpi_nr;
1037 
1038 	device = find_its_device(its, device_id);
1039 	if (!device)
1040 		return E_ITS_MAPTI_UNMAPPED_DEVICE;
1041 
1042 	if (!vgic_its_check_event_id(its, device, event_id))
1043 		return E_ITS_MAPTI_ID_OOR;
1044 
1045 	if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1046 		lpi_nr = its_cmd_get_physical_id(its_cmd);
1047 	else
1048 		lpi_nr = event_id;
1049 	if (lpi_nr < GIC_LPI_OFFSET ||
1050 	    lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1051 		return E_ITS_MAPTI_PHYSICALID_OOR;
1052 
1053 	/* If there is an existing mapping, behavior is UNPREDICTABLE. */
1054 	if (find_ite(its, device_id, event_id))
1055 		return 0;
1056 
1057 	collection = find_collection(its, coll_id);
1058 	if (!collection) {
1059 		int ret;
1060 
1061 		if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
1062 			return E_ITS_MAPC_COLLECTION_OOR;
1063 
1064 		ret = vgic_its_alloc_collection(its, &collection, coll_id);
1065 		if (ret)
1066 			return ret;
1067 		new_coll = collection;
1068 	}
1069 
1070 	ite = vgic_its_alloc_ite(device, collection, event_id);
1071 	if (IS_ERR(ite)) {
1072 		if (new_coll)
1073 			vgic_its_free_collection(its, coll_id);
1074 		return PTR_ERR(ite);
1075 	}
1076 
1077 	if (its_is_collection_mapped(collection))
1078 		vcpu = collection_to_vcpu(kvm, collection);
1079 
1080 	irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1081 	if (IS_ERR(irq)) {
1082 		if (new_coll)
1083 			vgic_its_free_collection(its, coll_id);
1084 		its_free_ite(kvm, ite);
1085 		return PTR_ERR(irq);
1086 	}
1087 	ite->irq = irq;
1088 
1089 	return 0;
1090 }
1091 
1092 /* Requires the its_lock to be held. */
vgic_its_free_device(struct kvm * kvm,struct vgic_its * its,struct its_device * device)1093 static void vgic_its_free_device(struct kvm *kvm, struct vgic_its *its,
1094 				 struct its_device *device)
1095 {
1096 	struct its_ite *ite, *temp;
1097 
1098 	/*
1099 	 * The spec says that unmapping a device with still valid
1100 	 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1101 	 * since we cannot leave the memory unreferenced.
1102 	 */
1103 	list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1104 		its_free_ite(kvm, ite);
1105 
1106 	vgic_its_invalidate_cache(its);
1107 
1108 	list_del(&device->dev_list);
1109 	kfree(device);
1110 }
1111 
1112 /* its lock must be held */
vgic_its_free_device_list(struct kvm * kvm,struct vgic_its * its)1113 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1114 {
1115 	struct its_device *cur, *temp;
1116 
1117 	list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1118 		vgic_its_free_device(kvm, its, cur);
1119 }
1120 
1121 /* its lock must be held */
vgic_its_free_collection_list(struct kvm * kvm,struct vgic_its * its)1122 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1123 {
1124 	struct its_collection *cur, *temp;
1125 
1126 	list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1127 		vgic_its_free_collection(its, cur->collection_id);
1128 }
1129 
1130 /* Must be called with its_lock mutex held */
vgic_its_alloc_device(struct vgic_its * its,u32 device_id,gpa_t itt_addr,u8 num_eventid_bits)1131 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1132 						u32 device_id, gpa_t itt_addr,
1133 						u8 num_eventid_bits)
1134 {
1135 	struct its_device *device;
1136 
1137 	device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT);
1138 	if (!device)
1139 		return ERR_PTR(-ENOMEM);
1140 
1141 	device->device_id = device_id;
1142 	device->itt_addr = itt_addr;
1143 	device->num_eventid_bits = num_eventid_bits;
1144 	INIT_LIST_HEAD(&device->itt_head);
1145 
1146 	list_add_tail(&device->dev_list, &its->device_list);
1147 	return device;
1148 }
1149 
1150 /*
1151  * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1152  * Must be called with the its_lock mutex held.
1153  */
vgic_its_cmd_handle_mapd(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1154 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1155 				    u64 *its_cmd)
1156 {
1157 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1158 	bool valid = its_cmd_get_validbit(its_cmd);
1159 	u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1160 	gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1161 	struct its_device *device;
1162 	gpa_t gpa;
1163 
1164 	if (!vgic_its_check_id(its, its->baser_device_table, device_id, &gpa))
1165 		return E_ITS_MAPD_DEVICE_OOR;
1166 
1167 	if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1168 		return E_ITS_MAPD_ITTSIZE_OOR;
1169 
1170 	device = find_its_device(its, device_id);
1171 
1172 	/*
1173 	 * The spec says that calling MAPD on an already mapped device
1174 	 * invalidates all cached data for this device. We implement this
1175 	 * by removing the mapping and re-establishing it.
1176 	 */
1177 	if (device)
1178 		vgic_its_free_device(kvm, its, device);
1179 
1180 	/*
1181 	 * The spec does not say whether unmapping a not-mapped device
1182 	 * is an error, so we are done in any case.
1183 	 */
1184 	if (!valid)
1185 		return vgic_its_write_entry_lock(its, gpa, 0ULL, dte);
1186 
1187 	device = vgic_its_alloc_device(its, device_id, itt_addr,
1188 				       num_eventid_bits);
1189 
1190 	return PTR_ERR_OR_ZERO(device);
1191 }
1192 
1193 /*
1194  * The MAPC command maps collection IDs to redistributors.
1195  * Must be called with the its_lock mutex held.
1196  */
vgic_its_cmd_handle_mapc(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1197 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1198 				    u64 *its_cmd)
1199 {
1200 	u16 coll_id;
1201 	struct its_collection *collection;
1202 	bool valid;
1203 
1204 	valid = its_cmd_get_validbit(its_cmd);
1205 	coll_id = its_cmd_get_collection(its_cmd);
1206 
1207 	if (!valid) {
1208 		vgic_its_free_collection(its, coll_id);
1209 		vgic_its_invalidate_cache(its);
1210 	} else {
1211 		struct kvm_vcpu *vcpu;
1212 
1213 		vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1214 		if (!vcpu)
1215 			return E_ITS_MAPC_PROCNUM_OOR;
1216 
1217 		collection = find_collection(its, coll_id);
1218 
1219 		if (!collection) {
1220 			int ret;
1221 
1222 			if (!vgic_its_check_id(its, its->baser_coll_table,
1223 						coll_id, NULL))
1224 				return E_ITS_MAPC_COLLECTION_OOR;
1225 
1226 			ret = vgic_its_alloc_collection(its, &collection,
1227 							coll_id);
1228 			if (ret)
1229 				return ret;
1230 			collection->target_addr = vcpu->vcpu_id;
1231 		} else {
1232 			collection->target_addr = vcpu->vcpu_id;
1233 			update_affinity_collection(kvm, its, collection);
1234 		}
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 /*
1241  * The CLEAR command removes the pending state for a particular LPI.
1242  * Must be called with the its_lock mutex held.
1243  */
vgic_its_cmd_handle_clear(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1244 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1245 				     u64 *its_cmd)
1246 {
1247 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1248 	u32 event_id = its_cmd_get_id(its_cmd);
1249 	struct its_ite *ite;
1250 
1251 
1252 	ite = find_ite(its, device_id, event_id);
1253 	if (!ite)
1254 		return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1255 
1256 	ite->irq->pending_latch = false;
1257 
1258 	if (ite->irq->hw)
1259 		return irq_set_irqchip_state(ite->irq->host_irq,
1260 					     IRQCHIP_STATE_PENDING, false);
1261 
1262 	return 0;
1263 }
1264 
vgic_its_inv_lpi(struct kvm * kvm,struct vgic_irq * irq)1265 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
1266 {
1267 	return update_lpi_config(kvm, irq, NULL, true);
1268 }
1269 
1270 /*
1271  * The INV command syncs the configuration bits from the memory table.
1272  * Must be called with the its_lock mutex held.
1273  */
vgic_its_cmd_handle_inv(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1274 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1275 				   u64 *its_cmd)
1276 {
1277 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1278 	u32 event_id = its_cmd_get_id(its_cmd);
1279 	struct its_ite *ite;
1280 
1281 
1282 	ite = find_ite(its, device_id, event_id);
1283 	if (!ite)
1284 		return E_ITS_INV_UNMAPPED_INTERRUPT;
1285 
1286 	return vgic_its_inv_lpi(kvm, ite->irq);
1287 }
1288 
1289 /**
1290  * vgic_its_invall - invalidate all LPIs targeting a given vcpu
1291  * @vcpu: the vcpu for which the RD is targeted by an invalidation
1292  *
1293  * Contrary to the INVALL command, this targets a RD instead of a
1294  * collection, and we don't need to hold the its_lock, since no ITS is
1295  * involved here.
1296  */
vgic_its_invall(struct kvm_vcpu * vcpu)1297 int vgic_its_invall(struct kvm_vcpu *vcpu)
1298 {
1299 	struct kvm *kvm = vcpu->kvm;
1300 	struct vgic_dist *dist = &kvm->arch.vgic;
1301 	struct vgic_irq *irq;
1302 	unsigned long intid;
1303 
1304 	xa_for_each(&dist->lpi_xa, intid, irq) {
1305 		irq = vgic_get_irq(kvm, intid);
1306 		if (!irq)
1307 			continue;
1308 
1309 		update_lpi_config(kvm, irq, vcpu, false);
1310 		vgic_put_irq(kvm, irq);
1311 	}
1312 
1313 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1314 		its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1315 
1316 	return 0;
1317 }
1318 
1319 /*
1320  * The INVALL command requests flushing of all IRQ data in this collection.
1321  * Find the VCPU mapped to that collection, then iterate over the VM's list
1322  * of mapped LPIs and update the configuration for each IRQ which targets
1323  * the specified vcpu. The configuration will be read from the in-memory
1324  * configuration table.
1325  * Must be called with the its_lock mutex held.
1326  */
vgic_its_cmd_handle_invall(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1327 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1328 				      u64 *its_cmd)
1329 {
1330 	u32 coll_id = its_cmd_get_collection(its_cmd);
1331 	struct its_collection *collection;
1332 	struct kvm_vcpu *vcpu;
1333 
1334 	collection = find_collection(its, coll_id);
1335 	if (!its_is_collection_mapped(collection))
1336 		return E_ITS_INVALL_UNMAPPED_COLLECTION;
1337 
1338 	vcpu = collection_to_vcpu(kvm, collection);
1339 	vgic_its_invall(vcpu);
1340 
1341 	return 0;
1342 }
1343 
1344 /*
1345  * The MOVALL command moves the pending state of all IRQs targeting one
1346  * redistributor to another. We don't hold the pending state in the VCPUs,
1347  * but in the IRQs instead, so there is really not much to do for us here.
1348  * However the spec says that no IRQ must target the old redistributor
1349  * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1350  * This command affects all LPIs in the system that target that redistributor.
1351  */
vgic_its_cmd_handle_movall(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1352 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1353 				      u64 *its_cmd)
1354 {
1355 	struct vgic_dist *dist = &kvm->arch.vgic;
1356 	struct kvm_vcpu *vcpu1, *vcpu2;
1357 	struct vgic_irq *irq;
1358 	unsigned long intid;
1359 
1360 	/* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1361 	vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1362 	vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
1363 
1364 	if (!vcpu1 || !vcpu2)
1365 		return E_ITS_MOVALL_PROCNUM_OOR;
1366 
1367 	if (vcpu1 == vcpu2)
1368 		return 0;
1369 
1370 	xa_for_each(&dist->lpi_xa, intid, irq) {
1371 		irq = vgic_get_irq(kvm, intid);
1372 		if (!irq)
1373 			continue;
1374 
1375 		update_affinity(irq, vcpu2);
1376 
1377 		vgic_put_irq(kvm, irq);
1378 	}
1379 
1380 	vgic_its_invalidate_cache(its);
1381 
1382 	return 0;
1383 }
1384 
1385 /*
1386  * The INT command injects the LPI associated with that DevID/EvID pair.
1387  * Must be called with the its_lock mutex held.
1388  */
vgic_its_cmd_handle_int(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1389 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1390 				   u64 *its_cmd)
1391 {
1392 	u32 msi_data = its_cmd_get_id(its_cmd);
1393 	u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1394 
1395 	return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1396 }
1397 
1398 /*
1399  * This function is called with the its_cmd lock held, but the ITS data
1400  * structure lock dropped.
1401  */
vgic_its_handle_command(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1402 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1403 				   u64 *its_cmd)
1404 {
1405 	int ret = -ENODEV;
1406 
1407 	mutex_lock(&its->its_lock);
1408 	switch (its_cmd_get_command(its_cmd)) {
1409 	case GITS_CMD_MAPD:
1410 		ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1411 		break;
1412 	case GITS_CMD_MAPC:
1413 		ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1414 		break;
1415 	case GITS_CMD_MAPI:
1416 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1417 		break;
1418 	case GITS_CMD_MAPTI:
1419 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1420 		break;
1421 	case GITS_CMD_MOVI:
1422 		ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1423 		break;
1424 	case GITS_CMD_DISCARD:
1425 		ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1426 		break;
1427 	case GITS_CMD_CLEAR:
1428 		ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1429 		break;
1430 	case GITS_CMD_MOVALL:
1431 		ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1432 		break;
1433 	case GITS_CMD_INT:
1434 		ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1435 		break;
1436 	case GITS_CMD_INV:
1437 		ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1438 		break;
1439 	case GITS_CMD_INVALL:
1440 		ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1441 		break;
1442 	case GITS_CMD_SYNC:
1443 		/* we ignore this command: we are in sync all of the time */
1444 		ret = 0;
1445 		break;
1446 	}
1447 	mutex_unlock(&its->its_lock);
1448 
1449 	return ret;
1450 }
1451 
vgic_sanitise_its_baser(u64 reg)1452 static u64 vgic_sanitise_its_baser(u64 reg)
1453 {
1454 	reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1455 				  GITS_BASER_SHAREABILITY_SHIFT,
1456 				  vgic_sanitise_shareability);
1457 	reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1458 				  GITS_BASER_INNER_CACHEABILITY_SHIFT,
1459 				  vgic_sanitise_inner_cacheability);
1460 	reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1461 				  GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1462 				  vgic_sanitise_outer_cacheability);
1463 
1464 	/* We support only one (ITS) page size: 64K */
1465 	reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1466 
1467 	return reg;
1468 }
1469 
vgic_sanitise_its_cbaser(u64 reg)1470 static u64 vgic_sanitise_its_cbaser(u64 reg)
1471 {
1472 	reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1473 				  GITS_CBASER_SHAREABILITY_SHIFT,
1474 				  vgic_sanitise_shareability);
1475 	reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1476 				  GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1477 				  vgic_sanitise_inner_cacheability);
1478 	reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1479 				  GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1480 				  vgic_sanitise_outer_cacheability);
1481 
1482 	/* Sanitise the physical address to be 64k aligned. */
1483 	reg &= ~GENMASK_ULL(15, 12);
1484 
1485 	return reg;
1486 }
1487 
vgic_mmio_read_its_cbaser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1488 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1489 					       struct vgic_its *its,
1490 					       gpa_t addr, unsigned int len)
1491 {
1492 	return extract_bytes(its->cbaser, addr & 7, len);
1493 }
1494 
vgic_mmio_write_its_cbaser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1495 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1496 				       gpa_t addr, unsigned int len,
1497 				       unsigned long val)
1498 {
1499 	/* When GITS_CTLR.Enable is 1, this register is RO. */
1500 	if (its->enabled)
1501 		return;
1502 
1503 	mutex_lock(&its->cmd_lock);
1504 	its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1505 	its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1506 	its->creadr = 0;
1507 	/*
1508 	 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1509 	 * it to CREADR to make sure we start with an empty command buffer.
1510 	 */
1511 	its->cwriter = its->creadr;
1512 	mutex_unlock(&its->cmd_lock);
1513 }
1514 
1515 #define ITS_CMD_BUFFER_SIZE(baser)	((((baser) & 0xff) + 1) << 12)
1516 #define ITS_CMD_SIZE			32
1517 #define ITS_CMD_OFFSET(reg)		((reg) & GENMASK(19, 5))
1518 
1519 /* Must be called with the cmd_lock held. */
vgic_its_process_commands(struct kvm * kvm,struct vgic_its * its)1520 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1521 {
1522 	gpa_t cbaser;
1523 	u64 cmd_buf[4];
1524 
1525 	/* Commands are only processed when the ITS is enabled. */
1526 	if (!its->enabled)
1527 		return;
1528 
1529 	cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1530 
1531 	while (its->cwriter != its->creadr) {
1532 		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1533 					      cmd_buf, ITS_CMD_SIZE);
1534 		/*
1535 		 * If kvm_read_guest() fails, this could be due to the guest
1536 		 * programming a bogus value in CBASER or something else going
1537 		 * wrong from which we cannot easily recover.
1538 		 * According to section 6.3.2 in the GICv3 spec we can just
1539 		 * ignore that command then.
1540 		 */
1541 		if (!ret)
1542 			vgic_its_handle_command(kvm, its, cmd_buf);
1543 
1544 		its->creadr += ITS_CMD_SIZE;
1545 		if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1546 			its->creadr = 0;
1547 	}
1548 }
1549 
1550 /*
1551  * By writing to CWRITER the guest announces new commands to be processed.
1552  * To avoid any races in the first place, we take the its_cmd lock, which
1553  * protects our ring buffer variables, so that there is only one user
1554  * per ITS handling commands at a given time.
1555  */
vgic_mmio_write_its_cwriter(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1556 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1557 					gpa_t addr, unsigned int len,
1558 					unsigned long val)
1559 {
1560 	u64 reg;
1561 
1562 	if (!its)
1563 		return;
1564 
1565 	mutex_lock(&its->cmd_lock);
1566 
1567 	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1568 	reg = ITS_CMD_OFFSET(reg);
1569 	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1570 		mutex_unlock(&its->cmd_lock);
1571 		return;
1572 	}
1573 	its->cwriter = reg;
1574 
1575 	vgic_its_process_commands(kvm, its);
1576 
1577 	mutex_unlock(&its->cmd_lock);
1578 }
1579 
vgic_mmio_read_its_cwriter(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1580 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1581 						struct vgic_its *its,
1582 						gpa_t addr, unsigned int len)
1583 {
1584 	return extract_bytes(its->cwriter, addr & 0x7, len);
1585 }
1586 
vgic_mmio_read_its_creadr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1587 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1588 					       struct vgic_its *its,
1589 					       gpa_t addr, unsigned int len)
1590 {
1591 	return extract_bytes(its->creadr, addr & 0x7, len);
1592 }
1593 
vgic_mmio_uaccess_write_its_creadr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1594 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1595 					      struct vgic_its *its,
1596 					      gpa_t addr, unsigned int len,
1597 					      unsigned long val)
1598 {
1599 	u32 cmd_offset;
1600 	int ret = 0;
1601 
1602 	mutex_lock(&its->cmd_lock);
1603 
1604 	if (its->enabled) {
1605 		ret = -EBUSY;
1606 		goto out;
1607 	}
1608 
1609 	cmd_offset = ITS_CMD_OFFSET(val);
1610 	if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1611 		ret = -EINVAL;
1612 		goto out;
1613 	}
1614 
1615 	its->creadr = cmd_offset;
1616 out:
1617 	mutex_unlock(&its->cmd_lock);
1618 	return ret;
1619 }
1620 
1621 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
vgic_mmio_read_its_baser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1622 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1623 					      struct vgic_its *its,
1624 					      gpa_t addr, unsigned int len)
1625 {
1626 	u64 reg;
1627 
1628 	switch (BASER_INDEX(addr)) {
1629 	case 0:
1630 		reg = its->baser_device_table;
1631 		break;
1632 	case 1:
1633 		reg = its->baser_coll_table;
1634 		break;
1635 	default:
1636 		reg = 0;
1637 		break;
1638 	}
1639 
1640 	return extract_bytes(reg, addr & 7, len);
1641 }
1642 
1643 #define GITS_BASER_RO_MASK	(GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
vgic_mmio_write_its_baser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1644 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1645 				      struct vgic_its *its,
1646 				      gpa_t addr, unsigned int len,
1647 				      unsigned long val)
1648 {
1649 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1650 	u64 entry_size, table_type;
1651 	u64 reg, *regptr, clearbits = 0;
1652 
1653 	/* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1654 	if (its->enabled)
1655 		return;
1656 
1657 	switch (BASER_INDEX(addr)) {
1658 	case 0:
1659 		regptr = &its->baser_device_table;
1660 		entry_size = abi->dte_esz;
1661 		table_type = GITS_BASER_TYPE_DEVICE;
1662 		break;
1663 	case 1:
1664 		regptr = &its->baser_coll_table;
1665 		entry_size = abi->cte_esz;
1666 		table_type = GITS_BASER_TYPE_COLLECTION;
1667 		clearbits = GITS_BASER_INDIRECT;
1668 		break;
1669 	default:
1670 		return;
1671 	}
1672 
1673 	reg = update_64bit_reg(*regptr, addr & 7, len, val);
1674 	reg &= ~GITS_BASER_RO_MASK;
1675 	reg &= ~clearbits;
1676 
1677 	reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1678 	reg |= table_type << GITS_BASER_TYPE_SHIFT;
1679 	reg = vgic_sanitise_its_baser(reg);
1680 
1681 	*regptr = reg;
1682 
1683 	if (!(reg & GITS_BASER_VALID)) {
1684 		/* Take the its_lock to prevent a race with a save/restore */
1685 		mutex_lock(&its->its_lock);
1686 		switch (table_type) {
1687 		case GITS_BASER_TYPE_DEVICE:
1688 			vgic_its_free_device_list(kvm, its);
1689 			break;
1690 		case GITS_BASER_TYPE_COLLECTION:
1691 			vgic_its_free_collection_list(kvm, its);
1692 			break;
1693 		}
1694 		mutex_unlock(&its->its_lock);
1695 	}
1696 }
1697 
vgic_mmio_read_its_ctlr(struct kvm * vcpu,struct vgic_its * its,gpa_t addr,unsigned int len)1698 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1699 					     struct vgic_its *its,
1700 					     gpa_t addr, unsigned int len)
1701 {
1702 	u32 reg = 0;
1703 
1704 	mutex_lock(&its->cmd_lock);
1705 	if (its->creadr == its->cwriter)
1706 		reg |= GITS_CTLR_QUIESCENT;
1707 	if (its->enabled)
1708 		reg |= GITS_CTLR_ENABLE;
1709 	mutex_unlock(&its->cmd_lock);
1710 
1711 	return reg;
1712 }
1713 
vgic_mmio_write_its_ctlr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1714 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1715 				     gpa_t addr, unsigned int len,
1716 				     unsigned long val)
1717 {
1718 	mutex_lock(&its->cmd_lock);
1719 
1720 	/*
1721 	 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1722 	 * device/collection BASER are invalid
1723 	 */
1724 	if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1725 		(!(its->baser_device_table & GITS_BASER_VALID) ||
1726 		 !(its->baser_coll_table & GITS_BASER_VALID) ||
1727 		 !(its->cbaser & GITS_CBASER_VALID)))
1728 		goto out;
1729 
1730 	its->enabled = !!(val & GITS_CTLR_ENABLE);
1731 	if (!its->enabled)
1732 		vgic_its_invalidate_cache(its);
1733 
1734 	/*
1735 	 * Try to process any pending commands. This function bails out early
1736 	 * if the ITS is disabled or no commands have been queued.
1737 	 */
1738 	vgic_its_process_commands(kvm, its);
1739 
1740 out:
1741 	mutex_unlock(&its->cmd_lock);
1742 }
1743 
1744 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)		\
1745 {								\
1746 	.reg_offset = off,					\
1747 	.len = length,						\
1748 	.access_flags = acc,					\
1749 	.its_read = rd,						\
1750 	.its_write = wr,					\
1751 }
1752 
1753 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1754 {								\
1755 	.reg_offset = off,					\
1756 	.len = length,						\
1757 	.access_flags = acc,					\
1758 	.its_read = rd,						\
1759 	.its_write = wr,					\
1760 	.uaccess_its_write = uwr,				\
1761 }
1762 
its_mmio_write_wi(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1763 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1764 			      gpa_t addr, unsigned int len, unsigned long val)
1765 {
1766 	/* Ignore */
1767 }
1768 
1769 static struct vgic_register_region its_registers[] = {
1770 	REGISTER_ITS_DESC(GITS_CTLR,
1771 		vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1772 		VGIC_ACCESS_32bit),
1773 	REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1774 		vgic_mmio_read_its_iidr, its_mmio_write_wi,
1775 		vgic_mmio_uaccess_write_its_iidr, 4,
1776 		VGIC_ACCESS_32bit),
1777 	REGISTER_ITS_DESC(GITS_TYPER,
1778 		vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1779 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1780 	REGISTER_ITS_DESC(GITS_CBASER,
1781 		vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1782 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1783 	REGISTER_ITS_DESC(GITS_CWRITER,
1784 		vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1785 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1786 	REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1787 		vgic_mmio_read_its_creadr, its_mmio_write_wi,
1788 		vgic_mmio_uaccess_write_its_creadr, 8,
1789 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1790 	REGISTER_ITS_DESC(GITS_BASER,
1791 		vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1792 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1793 	REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1794 		vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1795 		VGIC_ACCESS_32bit),
1796 };
1797 
1798 /* This is called on setting the LPI enable bit in the redistributor. */
vgic_enable_lpis(struct kvm_vcpu * vcpu)1799 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1800 {
1801 	if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1802 		its_sync_lpi_pending_table(vcpu);
1803 }
1804 
vgic_register_its_iodev(struct kvm * kvm,struct vgic_its * its,u64 addr)1805 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1806 				   u64 addr)
1807 {
1808 	struct vgic_io_device *iodev = &its->iodev;
1809 	int ret;
1810 
1811 	mutex_lock(&kvm->slots_lock);
1812 	if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1813 		ret = -EBUSY;
1814 		goto out;
1815 	}
1816 
1817 	its->vgic_its_base = addr;
1818 	iodev->regions = its_registers;
1819 	iodev->nr_regions = ARRAY_SIZE(its_registers);
1820 	kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1821 
1822 	iodev->base_addr = its->vgic_its_base;
1823 	iodev->iodev_type = IODEV_ITS;
1824 	iodev->its = its;
1825 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1826 				      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1827 out:
1828 	mutex_unlock(&kvm->slots_lock);
1829 
1830 	return ret;
1831 }
1832 
1833 #define INITIAL_BASER_VALUE						  \
1834 	(GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)		| \
1835 	 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)		| \
1836 	 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)		| \
1837 	 GITS_BASER_PAGE_SIZE_64K)
1838 
1839 #define INITIAL_PROPBASER_VALUE						  \
1840 	(GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)		| \
1841 	 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)	| \
1842 	 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1843 
vgic_its_create(struct kvm_device * dev,u32 type)1844 static int vgic_its_create(struct kvm_device *dev, u32 type)
1845 {
1846 	int ret;
1847 	struct vgic_its *its;
1848 
1849 	if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1850 		return -ENODEV;
1851 
1852 	its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT);
1853 	if (!its)
1854 		return -ENOMEM;
1855 
1856 	mutex_lock(&dev->kvm->arch.config_lock);
1857 
1858 	if (vgic_initialized(dev->kvm)) {
1859 		ret = vgic_v4_init(dev->kvm);
1860 		if (ret < 0) {
1861 			mutex_unlock(&dev->kvm->arch.config_lock);
1862 			kfree(its);
1863 			return ret;
1864 		}
1865 	}
1866 
1867 	mutex_init(&its->its_lock);
1868 	mutex_init(&its->cmd_lock);
1869 
1870 	/* Yep, even more trickery for lock ordering... */
1871 #ifdef CONFIG_LOCKDEP
1872 	mutex_lock(&its->cmd_lock);
1873 	mutex_lock(&its->its_lock);
1874 	mutex_unlock(&its->its_lock);
1875 	mutex_unlock(&its->cmd_lock);
1876 #endif
1877 
1878 	its->vgic_its_base = VGIC_ADDR_UNDEF;
1879 
1880 	INIT_LIST_HEAD(&its->device_list);
1881 	INIT_LIST_HEAD(&its->collection_list);
1882 	xa_init(&its->translation_cache);
1883 
1884 	dev->kvm->arch.vgic.msis_require_devid = true;
1885 	dev->kvm->arch.vgic.has_its = true;
1886 	its->enabled = false;
1887 	its->dev = dev;
1888 
1889 	its->baser_device_table = INITIAL_BASER_VALUE			|
1890 		((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1891 	its->baser_coll_table = INITIAL_BASER_VALUE |
1892 		((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1893 	dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1894 
1895 	dev->private = its;
1896 
1897 	ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1898 
1899 	mutex_unlock(&dev->kvm->arch.config_lock);
1900 
1901 	return ret;
1902 }
1903 
vgic_its_destroy(struct kvm_device * kvm_dev)1904 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1905 {
1906 	struct kvm *kvm = kvm_dev->kvm;
1907 	struct vgic_its *its = kvm_dev->private;
1908 
1909 	mutex_lock(&its->its_lock);
1910 
1911 	vgic_its_debug_destroy(kvm_dev);
1912 
1913 	vgic_its_free_device_list(kvm, its);
1914 	vgic_its_free_collection_list(kvm, its);
1915 	vgic_its_invalidate_cache(its);
1916 	xa_destroy(&its->translation_cache);
1917 
1918 	mutex_unlock(&its->its_lock);
1919 	kfree(its);
1920 	kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1921 }
1922 
vgic_its_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)1923 static int vgic_its_has_attr_regs(struct kvm_device *dev,
1924 				  struct kvm_device_attr *attr)
1925 {
1926 	const struct vgic_register_region *region;
1927 	gpa_t offset = attr->attr;
1928 	int align;
1929 
1930 	align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1931 
1932 	if (offset & align)
1933 		return -EINVAL;
1934 
1935 	region = vgic_find_mmio_region(its_registers,
1936 				       ARRAY_SIZE(its_registers),
1937 				       offset);
1938 	if (!region)
1939 		return -ENXIO;
1940 
1941 	return 0;
1942 }
1943 
vgic_its_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u64 * reg,bool is_write)1944 static int vgic_its_attr_regs_access(struct kvm_device *dev,
1945 				     struct kvm_device_attr *attr,
1946 				     u64 *reg, bool is_write)
1947 {
1948 	const struct vgic_register_region *region;
1949 	struct vgic_its *its;
1950 	gpa_t addr, offset;
1951 	unsigned int len;
1952 	int align, ret = 0;
1953 
1954 	its = dev->private;
1955 	offset = attr->attr;
1956 
1957 	/*
1958 	 * Although the spec supports upper/lower 32-bit accesses to
1959 	 * 64-bit ITS registers, the userspace ABI requires 64-bit
1960 	 * accesses to all 64-bit wide registers. We therefore only
1961 	 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1962 	 * registers
1963 	 */
1964 	if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1965 		align = 0x3;
1966 	else
1967 		align = 0x7;
1968 
1969 	if (offset & align)
1970 		return -EINVAL;
1971 
1972 	mutex_lock(&dev->kvm->lock);
1973 
1974 	if (kvm_trylock_all_vcpus(dev->kvm)) {
1975 		mutex_unlock(&dev->kvm->lock);
1976 		return -EBUSY;
1977 	}
1978 
1979 	mutex_lock(&dev->kvm->arch.config_lock);
1980 
1981 	if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1982 		ret = -ENXIO;
1983 		goto out;
1984 	}
1985 
1986 	region = vgic_find_mmio_region(its_registers,
1987 				       ARRAY_SIZE(its_registers),
1988 				       offset);
1989 	if (!region) {
1990 		ret = -ENXIO;
1991 		goto out;
1992 	}
1993 
1994 	addr = its->vgic_its_base + offset;
1995 
1996 	len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1997 
1998 	if (is_write) {
1999 		if (region->uaccess_its_write)
2000 			ret = region->uaccess_its_write(dev->kvm, its, addr,
2001 							len, *reg);
2002 		else
2003 			region->its_write(dev->kvm, its, addr, len, *reg);
2004 	} else {
2005 		*reg = region->its_read(dev->kvm, its, addr, len);
2006 	}
2007 out:
2008 	mutex_unlock(&dev->kvm->arch.config_lock);
2009 	kvm_unlock_all_vcpus(dev->kvm);
2010 	mutex_unlock(&dev->kvm->lock);
2011 	return ret;
2012 }
2013 
compute_next_devid_offset(struct list_head * h,struct its_device * dev)2014 static u32 compute_next_devid_offset(struct list_head *h,
2015 				     struct its_device *dev)
2016 {
2017 	struct its_device *next;
2018 	u32 next_offset;
2019 
2020 	if (list_is_last(&dev->dev_list, h))
2021 		return 0;
2022 	next = list_next_entry(dev, dev_list);
2023 	next_offset = next->device_id - dev->device_id;
2024 
2025 	return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2026 }
2027 
compute_next_eventid_offset(struct list_head * h,struct its_ite * ite)2028 static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2029 {
2030 	struct its_ite *next;
2031 	u32 next_offset;
2032 
2033 	if (list_is_last(&ite->ite_list, h))
2034 		return 0;
2035 	next = list_next_entry(ite, ite_list);
2036 	next_offset = next->event_id - ite->event_id;
2037 
2038 	return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2039 }
2040 
2041 /**
2042  * typedef entry_fn_t - Callback called on a table entry restore path
2043  * @its: its handle
2044  * @id: id of the entry
2045  * @entry: pointer to the entry
2046  * @opaque: pointer to an opaque data
2047  *
2048  * Return: < 0 on error, 0 if last element was identified, id offset to next
2049  * element otherwise
2050  */
2051 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2052 			  void *opaque);
2053 
2054 /**
2055  * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2056  * to each entry
2057  *
2058  * @its: its handle
2059  * @base: base gpa of the table
2060  * @size: size of the table in bytes
2061  * @esz: entry size in bytes
2062  * @start_id: the ID of the first entry in the table
2063  * (non zero for 2d level tables)
2064  * @fn: function to apply on each entry
2065  * @opaque: pointer to opaque data
2066  *
2067  * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2068  * (the last element may not be found on second level tables)
2069  */
scan_its_table(struct vgic_its * its,gpa_t base,int size,u32 esz,int start_id,entry_fn_t fn,void * opaque)2070 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2071 			  int start_id, entry_fn_t fn, void *opaque)
2072 {
2073 	struct kvm *kvm = its->dev->kvm;
2074 	unsigned long len = size;
2075 	int id = start_id;
2076 	gpa_t gpa = base;
2077 	char entry[ESZ_MAX];
2078 	int ret;
2079 
2080 	memset(entry, 0, esz);
2081 
2082 	while (true) {
2083 		int next_offset;
2084 		size_t byte_offset;
2085 
2086 		ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2087 		if (ret)
2088 			return ret;
2089 
2090 		next_offset = fn(its, id, entry, opaque);
2091 		if (next_offset <= 0)
2092 			return next_offset;
2093 
2094 		byte_offset = next_offset * esz;
2095 		if (byte_offset >= len)
2096 			break;
2097 
2098 		id += next_offset;
2099 		gpa += byte_offset;
2100 		len -= byte_offset;
2101 	}
2102 	return 1;
2103 }
2104 
2105 /*
2106  * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2107  */
vgic_its_save_ite(struct vgic_its * its,struct its_device * dev,struct its_ite * ite,gpa_t gpa)2108 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2109 			      struct its_ite *ite, gpa_t gpa)
2110 {
2111 	u32 next_offset;
2112 	u64 val;
2113 
2114 	next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2115 	val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2116 	       ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2117 		ite->collection->collection_id;
2118 	val = cpu_to_le64(val);
2119 
2120 	return vgic_its_write_entry_lock(its, gpa, val, ite);
2121 }
2122 
2123 /**
2124  * vgic_its_restore_ite - restore an interrupt translation entry
2125  *
2126  * @its: its handle
2127  * @event_id: id used for indexing
2128  * @ptr: pointer to the ITE entry
2129  * @opaque: pointer to the its_device
2130  */
vgic_its_restore_ite(struct vgic_its * its,u32 event_id,void * ptr,void * opaque)2131 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2132 				void *ptr, void *opaque)
2133 {
2134 	struct its_device *dev = opaque;
2135 	struct its_collection *collection;
2136 	struct kvm *kvm = its->dev->kvm;
2137 	struct kvm_vcpu *vcpu = NULL;
2138 	u64 val;
2139 	u64 *p = (u64 *)ptr;
2140 	struct vgic_irq *irq;
2141 	u32 coll_id, lpi_id;
2142 	struct its_ite *ite;
2143 	u32 offset;
2144 
2145 	val = *p;
2146 
2147 	val = le64_to_cpu(val);
2148 
2149 	coll_id = val & KVM_ITS_ITE_ICID_MASK;
2150 	lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2151 
2152 	if (!lpi_id)
2153 		return 1; /* invalid entry, no choice but to scan next entry */
2154 
2155 	if (lpi_id < VGIC_MIN_LPI)
2156 		return -EINVAL;
2157 
2158 	offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2159 	if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2160 		return -EINVAL;
2161 
2162 	collection = find_collection(its, coll_id);
2163 	if (!collection)
2164 		return -EINVAL;
2165 
2166 	if (!vgic_its_check_event_id(its, dev, event_id))
2167 		return -EINVAL;
2168 
2169 	ite = vgic_its_alloc_ite(dev, collection, event_id);
2170 	if (IS_ERR(ite))
2171 		return PTR_ERR(ite);
2172 
2173 	if (its_is_collection_mapped(collection))
2174 		vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
2175 
2176 	irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2177 	if (IS_ERR(irq)) {
2178 		its_free_ite(kvm, ite);
2179 		return PTR_ERR(irq);
2180 	}
2181 	ite->irq = irq;
2182 
2183 	return offset;
2184 }
2185 
vgic_its_ite_cmp(void * priv,const struct list_head * a,const struct list_head * b)2186 static int vgic_its_ite_cmp(void *priv, const struct list_head *a,
2187 			    const struct list_head *b)
2188 {
2189 	struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2190 	struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2191 
2192 	if (itea->event_id < iteb->event_id)
2193 		return -1;
2194 	else
2195 		return 1;
2196 }
2197 
vgic_its_save_itt(struct vgic_its * its,struct its_device * device)2198 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2199 {
2200 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2201 	gpa_t base = device->itt_addr;
2202 	struct its_ite *ite;
2203 	int ret;
2204 	int ite_esz = abi->ite_esz;
2205 
2206 	list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2207 
2208 	list_for_each_entry(ite, &device->itt_head, ite_list) {
2209 		gpa_t gpa = base + ite->event_id * ite_esz;
2210 
2211 		/*
2212 		 * If an LPI carries the HW bit, this means that this
2213 		 * interrupt is controlled by GICv4, and we do not
2214 		 * have direct access to that state without GICv4.1.
2215 		 * Let's simply fail the save operation...
2216 		 */
2217 		if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
2218 			return -EACCES;
2219 
2220 		ret = vgic_its_save_ite(its, device, ite, gpa);
2221 		if (ret)
2222 			return ret;
2223 	}
2224 	return 0;
2225 }
2226 
2227 /**
2228  * vgic_its_restore_itt - restore the ITT of a device
2229  *
2230  * @its: its handle
2231  * @dev: device handle
2232  *
2233  * Return 0 on success, < 0 on error
2234  */
vgic_its_restore_itt(struct vgic_its * its,struct its_device * dev)2235 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2236 {
2237 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2238 	gpa_t base = dev->itt_addr;
2239 	int ret;
2240 	int ite_esz = abi->ite_esz;
2241 	size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2242 
2243 	ret = scan_its_table(its, base, max_size, ite_esz, 0,
2244 			     vgic_its_restore_ite, dev);
2245 
2246 	/* scan_its_table returns +1 if all ITEs are invalid */
2247 	if (ret > 0)
2248 		ret = 0;
2249 
2250 	return ret;
2251 }
2252 
2253 /**
2254  * vgic_its_save_dte - Save a device table entry at a given GPA
2255  *
2256  * @its: ITS handle
2257  * @dev: ITS device
2258  * @ptr: GPA
2259  */
vgic_its_save_dte(struct vgic_its * its,struct its_device * dev,gpa_t ptr)2260 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2261 			     gpa_t ptr)
2262 {
2263 	u64 val, itt_addr_field;
2264 	u32 next_offset;
2265 
2266 	itt_addr_field = dev->itt_addr >> 8;
2267 	next_offset = compute_next_devid_offset(&its->device_list, dev);
2268 	val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2269 	       ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2270 	       (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2271 		(dev->num_eventid_bits - 1));
2272 	val = cpu_to_le64(val);
2273 
2274 	return vgic_its_write_entry_lock(its, ptr, val, dte);
2275 }
2276 
2277 /**
2278  * vgic_its_restore_dte - restore a device table entry
2279  *
2280  * @its: its handle
2281  * @id: device id the DTE corresponds to
2282  * @ptr: kernel VA where the 8 byte DTE is located
2283  * @opaque: unused
2284  *
2285  * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2286  * next dte otherwise
2287  */
vgic_its_restore_dte(struct vgic_its * its,u32 id,void * ptr,void * opaque)2288 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2289 				void *ptr, void *opaque)
2290 {
2291 	struct its_device *dev;
2292 	u64 baser = its->baser_device_table;
2293 	gpa_t itt_addr;
2294 	u8 num_eventid_bits;
2295 	u64 entry = *(u64 *)ptr;
2296 	bool valid;
2297 	u32 offset;
2298 	int ret;
2299 
2300 	entry = le64_to_cpu(entry);
2301 
2302 	valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2303 	num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2304 	itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2305 			>> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2306 
2307 	if (!valid)
2308 		return 1;
2309 
2310 	/* dte entry is valid */
2311 	offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2312 
2313 	if (!vgic_its_check_id(its, baser, id, NULL))
2314 		return -EINVAL;
2315 
2316 	dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2317 	if (IS_ERR(dev))
2318 		return PTR_ERR(dev);
2319 
2320 	ret = vgic_its_restore_itt(its, dev);
2321 	if (ret) {
2322 		vgic_its_free_device(its->dev->kvm, its, dev);
2323 		return ret;
2324 	}
2325 
2326 	return offset;
2327 }
2328 
vgic_its_device_cmp(void * priv,const struct list_head * a,const struct list_head * b)2329 static int vgic_its_device_cmp(void *priv, const struct list_head *a,
2330 			       const struct list_head *b)
2331 {
2332 	struct its_device *deva = container_of(a, struct its_device, dev_list);
2333 	struct its_device *devb = container_of(b, struct its_device, dev_list);
2334 
2335 	if (deva->device_id < devb->device_id)
2336 		return -1;
2337 	else
2338 		return 1;
2339 }
2340 
2341 /*
2342  * vgic_its_save_device_tables - Save the device table and all ITT
2343  * into guest RAM
2344  *
2345  * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2346  * returns the GPA of the device entry
2347  */
vgic_its_save_device_tables(struct vgic_its * its)2348 static int vgic_its_save_device_tables(struct vgic_its *its)
2349 {
2350 	u64 baser = its->baser_device_table;
2351 	struct its_device *dev;
2352 
2353 	if (!(baser & GITS_BASER_VALID))
2354 		return 0;
2355 
2356 	list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2357 
2358 	list_for_each_entry(dev, &its->device_list, dev_list) {
2359 		int ret;
2360 		gpa_t eaddr;
2361 
2362 		if (!vgic_its_check_id(its, baser,
2363 				       dev->device_id, &eaddr))
2364 			return -EINVAL;
2365 
2366 		ret = vgic_its_save_itt(its, dev);
2367 		if (ret)
2368 			return ret;
2369 
2370 		ret = vgic_its_save_dte(its, dev, eaddr);
2371 		if (ret)
2372 			return ret;
2373 	}
2374 	return 0;
2375 }
2376 
2377 /**
2378  * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2379  *
2380  * @its: its handle
2381  * @id: index of the entry in the L1 table
2382  * @addr: kernel VA
2383  * @opaque: unused
2384  *
2385  * L1 table entries are scanned by steps of 1 entry
2386  * Return < 0 if error, 0 if last dte was found when scanning the L2
2387  * table, +1 otherwise (meaning next L1 entry must be scanned)
2388  */
handle_l1_dte(struct vgic_its * its,u32 id,void * addr,void * opaque)2389 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2390 			 void *opaque)
2391 {
2392 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2393 	int l2_start_id = id * (SZ_64K / abi->dte_esz);
2394 	u64 entry = *(u64 *)addr;
2395 	int dte_esz = abi->dte_esz;
2396 	gpa_t gpa;
2397 	int ret;
2398 
2399 	entry = le64_to_cpu(entry);
2400 
2401 	if (!(entry & KVM_ITS_L1E_VALID_MASK))
2402 		return 1;
2403 
2404 	gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2405 
2406 	ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2407 			     l2_start_id, vgic_its_restore_dte, NULL);
2408 
2409 	return ret;
2410 }
2411 
2412 /*
2413  * vgic_its_restore_device_tables - Restore the device table and all ITT
2414  * from guest RAM to internal data structs
2415  */
vgic_its_restore_device_tables(struct vgic_its * its)2416 static int vgic_its_restore_device_tables(struct vgic_its *its)
2417 {
2418 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2419 	u64 baser = its->baser_device_table;
2420 	int l1_esz, ret;
2421 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2422 	gpa_t l1_gpa;
2423 
2424 	if (!(baser & GITS_BASER_VALID))
2425 		return 0;
2426 
2427 	l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2428 
2429 	if (baser & GITS_BASER_INDIRECT) {
2430 		l1_esz = GITS_LVL1_ENTRY_SIZE;
2431 		ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2432 				     handle_l1_dte, NULL);
2433 	} else {
2434 		l1_esz = abi->dte_esz;
2435 		ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2436 				     vgic_its_restore_dte, NULL);
2437 	}
2438 
2439 	/* scan_its_table returns +1 if all entries are invalid */
2440 	if (ret > 0)
2441 		ret = 0;
2442 
2443 	if (ret < 0)
2444 		vgic_its_free_device_list(its->dev->kvm, its);
2445 
2446 	return ret;
2447 }
2448 
vgic_its_save_cte(struct vgic_its * its,struct its_collection * collection,gpa_t gpa)2449 static int vgic_its_save_cte(struct vgic_its *its,
2450 			     struct its_collection *collection,
2451 			     gpa_t gpa)
2452 {
2453 	u64 val;
2454 
2455 	val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2456 	       ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2457 	       collection->collection_id);
2458 	val = cpu_to_le64(val);
2459 
2460 	return vgic_its_write_entry_lock(its, gpa, val, cte);
2461 }
2462 
2463 /*
2464  * Restore a collection entry into the ITS collection table.
2465  * Return +1 on success, 0 if the entry was invalid (which should be
2466  * interpreted as end-of-table), and a negative error value for generic errors.
2467  */
vgic_its_restore_cte(struct vgic_its * its,gpa_t gpa)2468 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa)
2469 {
2470 	struct its_collection *collection;
2471 	struct kvm *kvm = its->dev->kvm;
2472 	u32 target_addr, coll_id;
2473 	u64 val;
2474 	int ret;
2475 
2476 	ret = vgic_its_read_entry_lock(its, gpa, &val, cte);
2477 	if (ret)
2478 		return ret;
2479 	val = le64_to_cpu(val);
2480 	if (!(val & KVM_ITS_CTE_VALID_MASK))
2481 		return 0;
2482 
2483 	target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2484 	coll_id = val & KVM_ITS_CTE_ICID_MASK;
2485 
2486 	if (target_addr != COLLECTION_NOT_MAPPED &&
2487 	    !kvm_get_vcpu_by_id(kvm, target_addr))
2488 		return -EINVAL;
2489 
2490 	collection = find_collection(its, coll_id);
2491 	if (collection)
2492 		return -EEXIST;
2493 
2494 	if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
2495 		return -EINVAL;
2496 
2497 	ret = vgic_its_alloc_collection(its, &collection, coll_id);
2498 	if (ret)
2499 		return ret;
2500 	collection->target_addr = target_addr;
2501 	return 1;
2502 }
2503 
2504 /*
2505  * vgic_its_save_collection_table - Save the collection table into
2506  * guest RAM
2507  */
vgic_its_save_collection_table(struct vgic_its * its)2508 static int vgic_its_save_collection_table(struct vgic_its *its)
2509 {
2510 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2511 	u64 baser = its->baser_coll_table;
2512 	gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2513 	struct its_collection *collection;
2514 	size_t max_size, filled = 0;
2515 	int ret, cte_esz = abi->cte_esz;
2516 
2517 	if (!(baser & GITS_BASER_VALID))
2518 		return 0;
2519 
2520 	max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2521 
2522 	list_for_each_entry(collection, &its->collection_list, coll_list) {
2523 		ret = vgic_its_save_cte(its, collection, gpa);
2524 		if (ret)
2525 			return ret;
2526 		gpa += cte_esz;
2527 		filled += cte_esz;
2528 	}
2529 
2530 	if (filled == max_size)
2531 		return 0;
2532 
2533 	/*
2534 	 * table is not fully filled, add a last dummy element
2535 	 * with valid bit unset
2536 	 */
2537 	return vgic_its_write_entry_lock(its, gpa, 0ULL, cte);
2538 }
2539 
2540 /*
2541  * vgic_its_restore_collection_table - reads the collection table
2542  * in guest memory and restores the ITS internal state. Requires the
2543  * BASER registers to be restored before.
2544  */
vgic_its_restore_collection_table(struct vgic_its * its)2545 static int vgic_its_restore_collection_table(struct vgic_its *its)
2546 {
2547 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2548 	u64 baser = its->baser_coll_table;
2549 	int cte_esz = abi->cte_esz;
2550 	size_t max_size, read = 0;
2551 	gpa_t gpa;
2552 	int ret;
2553 
2554 	if (!(baser & GITS_BASER_VALID))
2555 		return 0;
2556 
2557 	gpa = GITS_BASER_ADDR_48_to_52(baser);
2558 
2559 	max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2560 
2561 	while (read < max_size) {
2562 		ret = vgic_its_restore_cte(its, gpa);
2563 		if (ret <= 0)
2564 			break;
2565 		gpa += cte_esz;
2566 		read += cte_esz;
2567 	}
2568 
2569 	if (ret > 0)
2570 		return 0;
2571 
2572 	if (ret < 0)
2573 		vgic_its_free_collection_list(its->dev->kvm, its);
2574 
2575 	return ret;
2576 }
2577 
2578 /*
2579  * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2580  * according to v0 ABI
2581  */
vgic_its_save_tables_v0(struct vgic_its * its)2582 static int vgic_its_save_tables_v0(struct vgic_its *its)
2583 {
2584 	int ret;
2585 
2586 	ret = vgic_its_save_device_tables(its);
2587 	if (ret)
2588 		return ret;
2589 
2590 	return vgic_its_save_collection_table(its);
2591 }
2592 
2593 /*
2594  * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2595  * to internal data structs according to V0 ABI
2596  *
2597  */
vgic_its_restore_tables_v0(struct vgic_its * its)2598 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2599 {
2600 	int ret;
2601 
2602 	ret = vgic_its_restore_collection_table(its);
2603 	if (ret)
2604 		return ret;
2605 
2606 	ret = vgic_its_restore_device_tables(its);
2607 	if (ret)
2608 		vgic_its_free_collection_list(its->dev->kvm, its);
2609 	return ret;
2610 }
2611 
vgic_its_commit_v0(struct vgic_its * its)2612 static int vgic_its_commit_v0(struct vgic_its *its)
2613 {
2614 	const struct vgic_its_abi *abi;
2615 
2616 	abi = vgic_its_get_abi(its);
2617 	its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2618 	its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2619 
2620 	its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2621 					<< GITS_BASER_ENTRY_SIZE_SHIFT);
2622 
2623 	its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2624 					<< GITS_BASER_ENTRY_SIZE_SHIFT);
2625 	return 0;
2626 }
2627 
vgic_its_reset(struct kvm * kvm,struct vgic_its * its)2628 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2629 {
2630 	/* We need to keep the ABI specific field values */
2631 	its->baser_coll_table &= ~GITS_BASER_VALID;
2632 	its->baser_device_table &= ~GITS_BASER_VALID;
2633 	its->cbaser = 0;
2634 	its->creadr = 0;
2635 	its->cwriter = 0;
2636 	its->enabled = 0;
2637 	vgic_its_free_device_list(kvm, its);
2638 	vgic_its_free_collection_list(kvm, its);
2639 }
2640 
vgic_its_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2641 static int vgic_its_has_attr(struct kvm_device *dev,
2642 			     struct kvm_device_attr *attr)
2643 {
2644 	switch (attr->group) {
2645 	case KVM_DEV_ARM_VGIC_GRP_ADDR:
2646 		switch (attr->attr) {
2647 		case KVM_VGIC_ITS_ADDR_TYPE:
2648 			return 0;
2649 		}
2650 		break;
2651 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
2652 		switch (attr->attr) {
2653 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
2654 			return 0;
2655 		case KVM_DEV_ARM_ITS_CTRL_RESET:
2656 			return 0;
2657 		case KVM_DEV_ARM_ITS_SAVE_TABLES:
2658 			return 0;
2659 		case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2660 			return 0;
2661 		}
2662 		break;
2663 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2664 		return vgic_its_has_attr_regs(dev, attr);
2665 	}
2666 	return -ENXIO;
2667 }
2668 
vgic_its_ctrl(struct kvm * kvm,struct vgic_its * its,u64 attr)2669 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2670 {
2671 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2672 	int ret = 0;
2673 
2674 	if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2675 		return 0;
2676 
2677 	mutex_lock(&kvm->lock);
2678 
2679 	if (kvm_trylock_all_vcpus(kvm)) {
2680 		mutex_unlock(&kvm->lock);
2681 		return -EBUSY;
2682 	}
2683 
2684 	mutex_lock(&kvm->arch.config_lock);
2685 	mutex_lock(&its->its_lock);
2686 
2687 	switch (attr) {
2688 	case KVM_DEV_ARM_ITS_CTRL_RESET:
2689 		vgic_its_reset(kvm, its);
2690 		break;
2691 	case KVM_DEV_ARM_ITS_SAVE_TABLES:
2692 		ret = abi->save_tables(its);
2693 		break;
2694 	case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2695 		ret = abi->restore_tables(its);
2696 		break;
2697 	default:
2698 		ret = -ENXIO;
2699 		break;
2700 	}
2701 
2702 	mutex_unlock(&its->its_lock);
2703 	mutex_unlock(&kvm->arch.config_lock);
2704 	kvm_unlock_all_vcpus(kvm);
2705 	mutex_unlock(&kvm->lock);
2706 	return ret;
2707 }
2708 
2709 /*
2710  * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2711  * without the running VCPU when dirty ring is enabled.
2712  *
2713  * The running VCPU is required to track dirty guest pages when dirty ring
2714  * is enabled. Otherwise, the backup bitmap should be used to track the
2715  * dirty guest pages. When vgic/its tables are being saved, the backup
2716  * bitmap is used to track the dirty guest pages due to the missed running
2717  * VCPU in the period.
2718  */
kvm_arch_allow_write_without_running_vcpu(struct kvm * kvm)2719 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
2720 {
2721 	struct vgic_dist *dist = &kvm->arch.vgic;
2722 
2723 	return dist->table_write_in_progress;
2724 }
2725 
vgic_its_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2726 static int vgic_its_set_attr(struct kvm_device *dev,
2727 			     struct kvm_device_attr *attr)
2728 {
2729 	struct vgic_its *its = dev->private;
2730 	int ret;
2731 
2732 	switch (attr->group) {
2733 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2734 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2735 		unsigned long type = (unsigned long)attr->attr;
2736 		u64 addr;
2737 
2738 		if (type != KVM_VGIC_ITS_ADDR_TYPE)
2739 			return -ENODEV;
2740 
2741 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
2742 			return -EFAULT;
2743 
2744 		ret = vgic_check_iorange(dev->kvm, its->vgic_its_base,
2745 					 addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE);
2746 		if (ret)
2747 			return ret;
2748 
2749 		ret = vgic_register_its_iodev(dev->kvm, its, addr);
2750 		if (ret)
2751 			return ret;
2752 
2753 		return vgic_its_debug_init(dev);
2754 
2755 	}
2756 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
2757 		return vgic_its_ctrl(dev->kvm, its, attr->attr);
2758 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2759 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2760 		u64 reg;
2761 
2762 		if (get_user(reg, uaddr))
2763 			return -EFAULT;
2764 
2765 		return vgic_its_attr_regs_access(dev, attr, &reg, true);
2766 	}
2767 	}
2768 	return -ENXIO;
2769 }
2770 
vgic_its_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2771 static int vgic_its_get_attr(struct kvm_device *dev,
2772 			     struct kvm_device_attr *attr)
2773 {
2774 	switch (attr->group) {
2775 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2776 		struct vgic_its *its = dev->private;
2777 		u64 addr = its->vgic_its_base;
2778 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2779 		unsigned long type = (unsigned long)attr->attr;
2780 
2781 		if (type != KVM_VGIC_ITS_ADDR_TYPE)
2782 			return -ENODEV;
2783 
2784 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
2785 			return -EFAULT;
2786 		break;
2787 	}
2788 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2789 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2790 		u64 reg;
2791 		int ret;
2792 
2793 		ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2794 		if (ret)
2795 			return ret;
2796 		return put_user(reg, uaddr);
2797 	}
2798 	default:
2799 		return -ENXIO;
2800 	}
2801 
2802 	return 0;
2803 }
2804 
2805 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2806 	.name = "kvm-arm-vgic-its",
2807 	.create = vgic_its_create,
2808 	.destroy = vgic_its_destroy,
2809 	.set_attr = vgic_its_set_attr,
2810 	.get_attr = vgic_its_get_attr,
2811 	.has_attr = vgic_its_has_attr,
2812 };
2813 
kvm_vgic_register_its_device(void)2814 int kvm_vgic_register_its_device(void)
2815 {
2816 	return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2817 				       KVM_DEV_TYPE_ARM_VGIC_ITS);
2818 }
2819