Lines Matching full:vpes

181 	struct its_vpe		**vpes;  member
1858 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1862 * and we're better off mapping all VPEs always
1864 * If neither (a) nor (b) is true, then we map vPEs on demand.
1883 * If the VM wasn't mapped yet, iterate over the vpes and get in its_map_vm()
1892 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm()
1914 guard(raw_spinlock)(&vm->vpes[i]->vpe_lock); in its_unmap_vm()
1915 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1949 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
3805 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3812 * effect... Let's just hope VPEs don't migrate too often. in its_vpe_db_proxy_unmap_locked()
3814 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_unmap_locked()
3846 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_map_locked()
3847 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); in its_vpe_db_proxy_map_locked()
3850 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
3921 * If we lazily map the VPEs, this isn't an error and in its_vpe_set_affinity()
4656 vm->vpes[i]->vpe_db_lpi = base + i; in its_vpe_irq_domain_alloc()
4657 err = its_vpe_init(vm->vpes[i]); in its_vpe_irq_domain_alloc()
4661 vm->vpes[i]->vpe_db_lpi); in its_vpe_irq_domain_alloc()
4665 irqchip, vm->vpes[i]); in its_vpe_irq_domain_alloc()
5149 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), in its_init_vpe_domain()
5151 if (!vpe_proxy.vpes) in its_init_vpe_domain()
5158 kfree(vpe_proxy.vpes); in its_init_vpe_domain()