| /linux/arch/x86/kvm/ |
| H A D | trace.h | 29 __field( unsigned int, vcpu_id ) 37 __entry->vcpu_id = vcpu->vcpu_id; 46 __entry->vcpu_id, __entry->rip, 404 __field( unsigned int, vcpu_id ) \ 411 __entry->vcpu_id = vcpu->vcpu_id; \ 424 __entry->vcpu_id, \ 506 __field( unsigned int, vcpu_id ) 513 __entry->vcpu_id = vcpu->vcpu_id; 520 __entry->vcpu_id, __entry->guest_rip, 675 __entry->apicid = apic->vcpu->vcpu_id; [all …]
|
| H A D | ioapic.c | 110 old_val = test_bit(vcpu->vcpu_id, dest_map->map); in __rtc_irq_eoi_tracking_restore_one() 116 __set_bit(vcpu->vcpu_id, dest_map->map); in __rtc_irq_eoi_tracking_restore_one() 117 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector; in __rtc_irq_eoi_tracking_restore_one() 120 __clear_bit(vcpu->vcpu_id, dest_map->map); in __rtc_irq_eoi_tracking_restore_one() 154 if (test_bit(vcpu->vcpu_id, dest_map->map) && in rtc_irq_eoi() 155 (vector == dest_map->vectors[vcpu->vcpu_id]) && in rtc_irq_eoi() 156 (test_and_clear_bit(vcpu->vcpu_id, in rtc_irq_eoi() 275 if (test_bit(vcpu->vcpu_id, dest_map->map)) in kvm_ioapic_scan_entry() 276 __set_bit(dest_map->vectors[vcpu->vcpu_id], in kvm_ioapic_scan_entry()
|
| H A D | xen.c | 113 e.vcpu_id = vcpu->vcpu_id; in kvm_xen_inject_timer_irqs() 135 e.vcpu_id = vcpu->vcpu_id; in xen_timer_callback() 622 irq.dest_id = v->vcpu_id; in kvm_xen_inject_vcpu_vector() 1106 if (data->u.vcpu_id >= KVM_MAX_VCPUS) in kvm_xen_vcpu_set_attr() 1109 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; in kvm_xen_vcpu_set_attr() 1225 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; in kvm_xen_vcpu_get_attr() 1606 int vcpu_id, u64 param, u64 *r) in kvm_xen_hcall_vcpu_op() argument 1616 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op() 1648 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op() 1807 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); in kvm_xen_set_evtchn_fast() [all …]
|
| H A D | hyperv.c | 227 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); in kvm_hv_notify_acked_sint() 268 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr() 370 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, in syndbg_set_msr() 432 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); in syndbg_get_msr() 496 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); in synic_set_irq() 520 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); in kvm_hv_synic_send_eoi() 607 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_cleanup() 622 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_timer_callback() 656 hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start() 678 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/x86/ |
| H A D | memstress.c | 18 void memstress_l2_guest_code(uint64_t vcpu_id) in memstress_l2_guest_code() argument 20 memstress_guest_code(vcpu_id); in memstress_l2_guest_code() 32 static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) in memstress_l1_guest_code() argument 44 *rsp = vcpu_id; in memstress_l1_guest_code() 85 int vcpu_id; in memstress_setup_nested() local 90 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { in memstress_setup_nested() 93 if (vcpu_id == 0) { in memstress_setup_nested() 107 vcpu_regs_get(vcpus[vcpu_id], ®s); in memstress_setup_nested() 109 vcpu_regs_set(vcpus[vcpu_id], ®s); in memstress_setup_nested() 110 vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); in memstress_setup_nested()
|
| /linux/arch/arm64/kvm/vgic/ |
| H A D | trace.h | 11 TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level), 12 TP_ARGS(vcpu_id, irq, level), 15 __field( unsigned long, vcpu_id ) 21 __entry->vcpu_id = vcpu_id; 27 __entry->vcpu_id, __entry->irq, __entry->level)
|
| H A D | vgic-debug.c | 30 int vcpu_id; member 60 ++iter->vcpu_id < iter->nr_cpus) in iter_next() 122 iter->vcpu_id == iter->nr_cpus && in end_of_vgic() 291 if (iter->vcpu_id < iter->nr_cpus) in vgic_debug_show() 292 vcpu = kvm_get_vcpu(kvm, iter->vcpu_id); in vgic_debug_show()
|
| /linux/samples/acrn/ |
| H A D | vm-sample.c | 46 int vcpu_id, ret; in main() local 82 regs.vcpu_id = 0; in main() 108 for (vcpu_id = 0; vcpu_id < vcpu_num; vcpu_id++) { in main() 109 io_req = &io_req_buf[vcpu_id]; in main() 121 notify.vcpu = vcpu_id; in main()
|
| /linux/drivers/virt/nitro_enclaves/ |
| H A D | ne_misc_dev.c | 528 int core_id, u32 vcpu_id) in ne_set_enclave_threads_per_core() argument 532 if (core_id < 0 && vcpu_id == 0) { in ne_set_enclave_threads_per_core() 541 "CPU %d is not in NE CPU pool\n", vcpu_id); in ne_set_enclave_threads_per_core() 573 static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id) in ne_get_cpu_from_cpu_pool() argument 588 *vcpu_id = cpu; in ne_get_cpu_from_cpu_pool() 601 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id); in ne_get_cpu_from_cpu_pool() 605 *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]); in ne_get_cpu_from_cpu_pool() 626 static int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id) in ne_get_vcpu_core_from_cpu_pool() argument 632 if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) { in ne_get_vcpu_core_from_cpu_pool() 652 static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id) in ne_check_cpu_in_cpu_pool() argument [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | trace_arm.h | 207 TP_PROTO(unsigned long vcpu_id, __u32 irq, int level), 208 TP_ARGS(vcpu_id, irq, level), 211 __field( unsigned long, vcpu_id ) 217 __entry->vcpu_id = vcpu_id; 223 __entry->vcpu_id, __entry->irq, __entry->level) 227 TP_PROTO(unsigned long vcpu_id, struct timer_map *map), 228 TP_ARGS(vcpu_id, map), 231 __field( unsigned long, vcpu_id ) 239 __entry->vcpu_id = vcpu_id; 250 __entry->vcpu_id,
|
| /linux/arch/s390/kvm/ |
| H A D | sigp.c | 39 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, in __sigp_sense() 49 .u.emerg.code = vcpu->vcpu_id, in __inject_sigp_emergency() 56 dst_vcpu->vcpu_id); in __inject_sigp_emergency() 98 .u.extcall.code = vcpu->vcpu_id, in __sigp_external_call() 109 dst_vcpu->vcpu_id); in __sigp_external_call() 127 dst_vcpu->vcpu_id); in __sigp_stop() 146 dst_vcpu->vcpu_id); in __sigp_stop_and_store_status() 235 dst_vcpu->vcpu_id, rc); in __sigp_sense_running() 370 order_code, dst_vcpu->vcpu_id); in handle_sigp_dst()
|
| H A D | interrupt.c | 48 union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl; in sca_ext_call_pending() 64 union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; in sca_inject_ext_call() 87 union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; in sca_clear_ext_call() 438 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, in __deliver_cpu_timer() 462 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, in __deliver_ckc() 494 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, in __deliver_pfault_init() 662 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, in __deliver_machine_check() 678 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); in __deliver_restart() 705 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, in __deliver_set_prefix() 728 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, in __deliver_emergency_signal() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/arm64/ |
| H A D | gic_v3_its.c | 168 static u64 procnum_to_rdbase(u32 vcpu_id) in procnum_to_rdbase() argument 170 return vcpu_id << GITS_COLLECTION_TARGET_SHIFT; in procnum_to_rdbase() 221 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid) in its_send_mapc_cmd() argument 227 its_encode_target(&cmd, procnum_to_rdbase(vcpu_id)); in its_send_mapc_cmd() 257 void its_send_sync_cmd(void *cmdq_base, u32 vcpu_id) in its_send_sync_cmd() argument 262 its_encode_target(&cmd, procnum_to_rdbase(vcpu_id)); in its_send_sync_cmd()
|
| H A D | processor.c | 419 static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in __aarch64_vcpu_add() argument 424 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); in __aarch64_vcpu_add() 438 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in aarch64_vcpu_add() argument 441 struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); in aarch64_vcpu_add() 448 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 450 return __aarch64_vcpu_add(vm, vcpu_id, NULL); in vm_arch_vcpu_add()
|
| /linux/tools/testing/selftests/kvm/include/arm64/ |
| H A D | gic_v3_its.h | 14 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); 18 void its_send_sync_cmd(void *cmdq_base, u32 vcpu_id);
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | hyperv_ipi.c | 56 u32 vcpu_id; in receiver_code() local 61 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in receiver_code() 64 ipis_rcvd[vcpu_id] = (u64)-1; in receiver_code() 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in guest_ipi_handler() local 76 ipis_rcvd[vcpu_id]++; in guest_ipi_handler()
|
| H A D | hyperv_tlb_flush.c | 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in worker_guest_code() local 76 u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64)); in worker_guest_code() 125 static void set_expected_val(void *addr, u64 val, int vcpu_id) in set_expected_val() argument 129 *(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val; in set_expected_val()
|
| /linux/tools/perf/ |
| H A D | builtin-kvm.c | 633 int vcpu_id; member 669 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) in kvm_event_expand() argument 674 if (vcpu_id < event->max_vcpu) in kvm_event_expand() 677 while (event->max_vcpu <= vcpu_id) in kvm_event_expand() 776 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) in kvm_event_rel_stddev() argument 780 if (vcpu_id != -1) in kvm_event_rel_stddev() 781 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev() 788 struct kvm_event *event, int vcpu_id, in update_kvm_event() argument 795 if (vcpu_id == -1) { in update_kvm_event() 800 if (!kvm_event_expand(event, vcpu_id)) in update_kvm_event() [all …]
|
| /linux/arch/x86/kvm/svm/ |
| H A D | avic.c | 381 u32 id = vcpu->vcpu_id; in avic_init_backing_page() 450 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); in avic_ring_doorbell() 615 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception() 818 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset, in avic_unaccelerated_access_interception() 1012 if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= in __avic_vcpu_load() 1044 WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); in __avic_vcpu_load() 1075 if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= in __avic_vcpu_put() 1100 WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); in __avic_vcpu_put()
|
| /linux/arch/x86/kvm/mmu/ |
| H A D | mmutrace.h | 265 __field(int, vcpu_id) 275 __entry->vcpu_id = vcpu->vcpu_id; 285 " new %llx spurious %d fixed %d", __entry->vcpu_id,
|
| /linux/arch/riscv/kvm/ |
| H A D | vcpu_sbi_replace.c | 64 if (tmp->vcpu_id < hbase) in kvm_sbi_ext_ipi_handler() 66 hart_bit = tmp->vcpu_id - hbase; in kvm_sbi_ext_ipi_handler()
|
| /linux/arch/mips/kvm/ |
| H A D | stats.c | 54 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); in kvm_mips_dump_stats()
|
| /linux/include/xen/interface/ |
| H A D | xenpmu.h | 73 uint32_t vcpu_id; member
|
| /linux/tools/testing/selftests/kvm/include/ |
| H A D | kvm_util.h | 694 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1155 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1158 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add() argument 1161 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); in vm_vcpu_add() 1169 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1172 uint32_t vcpu_id) in vm_vcpu_recreate() argument 1174 return vm_arch_vcpu_recreate(vm, vcpu_id); in vm_vcpu_recreate()
|
| /linux/tools/testing/selftests/kvm/lib/s390/ |
| H A D | processor.c | 163 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 178 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
|