| /linux/tools/testing/selftests/kvm/ |
| H A D | kvm_page_table_test.c | 56 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; member 107 * Before dirty logging, vCPUs concurrently access the first in guest_code() 162 * After dirty logging is stopped, vCPUs concurrently read in guest_code() 256 guest_code, test_args.vcpus); in pre_init_before_test() 311 pr_info("Number of testing vCPUs: %d\n", nr_vcpus); in pre_init_before_test() 319 int vcpus; in vcpus_complete_new_stage() local 321 /* Wake up all the vcpus to run new test stage */ in vcpus_complete_new_stage() 322 for (vcpus = 0; vcpus < nr_vcpus; vcpus++) { in vcpus_complete_new_stage() 326 pr_debug("All vcpus have been notified to continue\n"); in vcpus_complete_new_stage() 328 /* Wait for all the vcpus to complete new test stage */ in vcpus_complete_new_stage() [all …]
|
| H A D | mmu_stress_test.c | 41 * has occurred, otherwise vCPUs may complete their writes and advance in guest_code() 149 * validating *all* of guest memory sync for this stage, as vCPUs will in vcpu_worker() 205 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, in spawn_workers() argument 224 info[i].vcpu = vcpus[i]; in spawn_workers() 236 pr_info("Waiting for vCPUs to finish %s...\n", name); in rendezvous_with_vcpus() 242 pr_info("\r%d vCPUs haven't rendezvoused...", in rendezvous_with_vcpus() 249 /* Release the vCPUs after getting the time of the previous action. */ in rendezvous_with_vcpus() 250 pr_info("\rAll vCPUs finished %s, releasing...\n", name); in rendezvous_with_vcpus() 286 struct kvm_vcpu **vcpus; in main() local 309 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() [all …]
|
| H A D | kvm_binary_stats_test.c | 179 * The second parameter #vcpu set the number of VCPUs being created. 188 struct kvm_vcpu **vcpus; in main() local 193 /* Get the number of VMs and VCPUs that would be created for testing. */ in main() 212 /* Create VMs and VCPUs */ in main() 216 vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu); in main() 217 TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers"); in main() 229 vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j); in main() 248 vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]); in main() 250 stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j])); in main() 269 free(vcpus); in main()
|
| H A D | arch_timer.c | 15 * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer 17 * even more, an option to migrate the vCPUs across pCPUs (-m), at a 40 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; variable 51 struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; in test_vcpu_run() 180 pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", in test_print_help() 186 …pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u… in test_print_help() 201 test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); in parse_args() 203 pr_info("Max allowed vCPUs: %u\n", in parse_args()
|
| H A D | memslot_modification_stress_test.c | 102 pr_info("Finished creating vCPUs\n"); in run_test() 106 pr_info("Started all vCPUs\n"); in run_test() 120 " [-b memory] [-v vcpus] [-o] [-i iterations]\n", name); in help() 128 printf(" -v: specify the number of vCPUs to run.\n"); in help() 162 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() 164 "Invalid number of vcpus, must be between 1 and %d", in main()
|
| H A D | access_tracking_perf_test.c | 17 * vCPUs that each touch every page in disjoint regions of memory. Performance 18 * is measured in the time it takes all vCPUs to finish touching their 77 /* Whether to overlap the regions of memory vCPUs access. */ 106 /* The number of vCPUs to create in the VM. */ 185 /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */ in pageidle_mark_vcpu_memory_idle() 367 /* Kick off the vCPUs by incrementing iteration. */ in run_iteration() 372 /* Wait for all vCPUs to finish the iteration. */ in run_iteration() 395 * Even though this parallelizes the work across vCPUs, this is still a in mark_memory_idle() 494 printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n", in help() 502 printf(" -v: specify the number of vCPUs t in help() [all...] |
| H A D | demand_paging_test.c | 209 pr_info("Finished creating vCPUs and starting uffd threads\n"); in run_test() 213 pr_info("Started all vCPUs\n"); in run_test() 247 " [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name); in help() 263 printf(" -v: specify the number of vCPUs to run.\n"); in help() 310 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() 312 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); in main()
|
| /linux/tools/testing/selftests/kvm/arm64/ |
| H A D | vgic_init.c | 76 struct kvm_vcpu *vcpus[]) in vm_gic_create_with_vcpus() argument 81 v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); in vm_gic_create_with_vcpus() 138 * DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be 337 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_vgic_then_vcpus() local 341 v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus); in test_vgic_then_vcpus() 345 /* Add the rest of the VCPUs */ in test_vgic_then_vcpus() 347 vcpus[i] = vm_vcpu_add(v.vm, i, guest_code); in test_vgic_then_vcpus() 349 ret = run_vcpu(vcpus[3]); in test_vgic_then_vcpus() 355 /* All the VCPUs are created before the VGIC KVM device gets initialized */ 358 struct kvm_vcpu *vcpus[NR_VCPU in test_vcpus_then_vgic() local 408 struct kvm_vcpu *vcpus[NR_VCPUS]; test_v3_new_redist_regions() local 606 struct kvm_vcpu *vcpus[NR_VCPUS]; test_v3_redist_ipa_range_check_at_vcpu_run() local 639 struct kvm_vcpu *vcpus[NR_VCPUS]; test_v3_its_region() local 681 struct kvm_vcpu *vcpus[NR_VCPUS]; test_v3_nassgicap() local 722 struct kvm_vcpu *vcpus[NR_VCPUS]; test_kvm_device() local [all...] |
| H A D | vgic_lpi_stress.c | 29 static struct kvm_vcpu **vcpus; variable 73 /* Round-robin the LPIs to all of the vCPUs in the VM */ in guest_setup_its_mappings() 310 pthread_create(&vcpu_threads[i], NULL, vcpu_worker_thread, vcpus[i]); in run_test() 335 vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu *)); in setup_vm() 336 TEST_ASSERT(vcpus, "Failed to allocate vCPU array"); in setup_vm() 338 vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus); in setup_vm() 342 vcpu_init_descriptor_tables(vcpus[i]); in setup_vm() 357 free(vcpus); in destroy_vm() 363 pr_info(" -v:\tnumber of vCPUs (default: %u)\n", test_data.nr_cpus); in pr_usage()
|
| /linux/tools/testing/selftests/kvm/include/ |
| H A D | memstress.h | 41 /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ 45 /* True if all vCPUs are pinned to pCPUs */ 50 /* Test is done, stop running vCPUs. */ 67 void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); 68 void memstress_join_vcpu_threads(int vcpus); 72 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | memstress.c | 41 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; variable 89 struct kvm_vcpu *vcpus[], in memstress_setup_vcpus() argument 100 vcpu_args->vcpu = vcpus[i]; in memstress_setup_vcpus() 116 vcpu_args_set(vcpus[i], 1, i); in memstress_setup_vcpus() 138 /* By default vCPUs will write to memory. */ in memstress_create_vm() 172 memstress_guest_code, vcpus); in memstress_create_vm() 181 * When running vCPUs in L2, restrict the test region to 48 bits to in memstress_create_vm() 220 memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes, in memstress_create_vm() 224 pr_info("Configuring vCPUs to run in L2 (nested).\n"); in memstress_create_vm() 225 memstress_setup_nested(vm, nr_vcpus, vcpus); in memstress_create_vm() [all …]
|
| /linux/Documentation/virt/kvm/ |
| H A D | vcpu-requests.rst | 36 /* Make request @req of all VCPUs of the VM with struct kvm @kvm. */ 42 and kvm_make_all_cpus_request() has the kicking of all VCPUs built 56 2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest 66 VCPUs have a mode state, ``vcpu->mode``, that is used to track whether the 69 ensure VCPU requests are seen by VCPUs (see "Ensuring Requests Are Seen"), 119 This request informs all VCPUs that the VM is dead and unusable, e.g. due to 153 from VCPUs running in guest mode. That is, sleeping VCPUs do not need 154 to be awakened for these requests. Sleeping VCPUs will handle the 161 proceeding. This flag only applies to VCPUs that would receive IPIs. 188 When making requests to VCPUs, we want to avoid the receiving VCPU [all …]
|
| /linux/arch/x86/kvm/vmx/ |
| H A D | posted_intr.c | 18 * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler() 19 * when a WAKEUP_VECTOR interrupted is posted. vCPUs are added to the list when 21 * The vCPUs posted interrupt descriptor is updated at the same time to set its 23 * wake the target vCPUs. vCPUs are removed from the list and the notification 152 * blockng vCPUs will see an elevated count or get KVM_REQ_UNBLOCK. in vmx_can_use_vtd_pi() 159 * Put the vCPU on this pCPU's list of vCPUs that needs to be awakened and set 293 * Kick all vCPUs when the first possible bypass IRQ is attached to a VM, as 294 * blocking vCPUs may scheduled out without reconfiguring PID.NV to the wakeup
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | xapic_state_test.c | 123 * Send all flavors of IPIs to non-existent vCPUs. Arbitrarily use in test_icr() 174 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_apic_id() local 179 vm = vm_create_with_vcpus(NR_VCPUS, NULL, vcpus); in test_apic_id() 183 apic_base = vcpu_get_msr(vcpus[i], MSR_IA32_APICBASE); in test_apic_id() 190 __test_apic_id(vcpus[i], apic_base); in test_apic_id() 191 __test_apic_id(vcpus[i], apic_base | X2APIC_ENABLE); in test_apic_id() 192 __test_apic_id(vcpus[i], apic_base); in test_apic_id()
|
| H A D | msrs_test.c | 342 static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS) in vcpus_run() argument 347 do_vcpu_run(vcpus[i]); in vcpus_run() 407 * Create three vCPUs, but run them on the same task, to validate KVM's in test_msrs() 410 * set of features for the first two vCPUs, but clear all features in in test_msrs() 414 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_msrs() local 424 vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus); in test_msrs() 443 vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature); in test_msrs() 451 host_test_kvm_reg(vcpus[i]); in test_msrs() 472 vcpus_run(vcpus, NR_VCPUS); in test_msrs() 473 vcpus_run(vcpus, NR_VCPUS); in test_msrs()
|
| /linux/tools/testing/selftests/kvm/riscv/ |
| H A D | arch_timer.c | 85 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); in test_vm_create() 86 __TEST_REQUIRE(__vcpu_has_isa_ext(vcpus[0], KVM_RISCV_ISA_EXT_SSTC), in test_vm_create() 93 vcpu_init_vector_tables(vcpus[i]); in test_vm_create() 96 timer_freq = vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency)); in test_vm_create()
|
| /linux/Documentation/virt/kvm/devices/ |
| H A D | vcpu.rst | 40 all vcpus, while as an SPI it must be a separate number per vcpu. 129 for one VCPU will be used by all the other VCPUs. It isn't possible to set a PMU 179 -EBUSY One or more VCPUs has already run 193 Setting the same PPI for different timers will prevent the VCPUs from running. 194 Setting the interrupt number on a VCPU configures all VCPUs created at that 196 configured values on other VCPUs. Userspace should configure the interrupt 197 numbers on at least one VCPU after creating all VCPUs and before running any 198 VCPUs.
|
| /linux/Documentation/arch/powerpc/ |
| H A D | vcpudispatch_stats.rst | 8 static mapping of the LPAR processors (vcpus) to physical processor 9 chips (representing the "home" node) and tries to always dispatch vcpus 11 scenarios, vcpus may be dispatched on a different processor chip (away
|
| /linux/drivers/xen/ |
| H A D | privcmd.c | 1141 unsigned int vcpus; member 1147 struct ioreq_port ports[] __counted_by(vcpus); 1212 for (i = kioreq->vcpus - 1; i >= 0; i--) in ioreq_free() 1230 size = struct_size(kioreq, ports, ioeventfd->vcpus); in alloc_ioreq() 1236 kioreq->vcpus = ioeventfd->vcpus; in alloc_ioreq() 1256 kioreq->vcpus, sizeof(*ports)); in alloc_ioreq() 1262 for (i = 0; i < kioreq->vcpus; i++) { in alloc_ioreq() 1306 kioreq->vcpus != ioeventfd->vcpus) { in get_ioreq() 1307 pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n", in get_ioreq() 1308 kioreq->dom, ioeventfd->dom, kioreq->vcpus, in get_ioreq() [all …]
|
| /linux/include/uapi/linux/ |
| H A D | nitro_enclaves.h | 20 * setting any resources, such as memory and vCPUs, for an 21 * enclave. Memory and vCPUs are set for the slot mapped to an enclave. 35 * ioctl calls to set vCPUs and memory 160 * vCPUs are set for an enclave. 171 * * NE_ERR_NO_VCPUS_ADDED - No vCPUs are set. 240 * vCPUs are added.
|
| /linux/Documentation/virt/kvm/x86/ |
| H A D | hypercalls.rst | 145 :Purpose: Send IPIs to multiple vCPUs. 153 128 destinations per hypercall in 64-bit mode and 64 vCPUs per 170 :Usage example: When sending a call-function IPI-many to vCPUs, yield if 171 any of the IPI target vCPUs was preempted.
|
| /linux/arch/x86/kvm/svm/ |
| H A D | avic.c | 37 * lookup on the index, where as vCPUs whose index doesn't match their ID need 38 * to walk the entire xarray of vCPUs in the worst case scenario. 381 * Initialize the real table, as vCPUs must have a valid entry in order in avic_init_backing_page() 422 * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID, in avic_kick_vcpu_by_physical_id() 464 * destination APIC ID to vCPU without looping through all vCPUs. 522 * AVIC is inhibited if vCPUs aren't mapped 1:1 with logical in avic_kick_target_vcpus_fast() 547 * Wake any target vCPUs that are blocking, i.e. waiting for a wake in avic_kick_target_vcpus() 549 * vCPUs that were in guest at the time of the IPI, and vCPUs that have in avic_kick_target_vcpus() 577 * is a superset of running vCPUs. in avic_incomplete_ipi_interception() 594 * vcpus. So, we just need to kick the appropriate vcpu. in avic_incomplete_ipi_interception() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | Kconfig | 233 int "Maximum number of vCPUs per KVM guest" 239 Set the maximum number of vCPUs per KVM guest. Larger values will increase 240 the memory footprint of each KVM guest, regardless of how many vCPUs are
|
| /linux/drivers/virt/nitro_enclaves/ |
| H A D | ne_pci_dev.h | 122 * @SLOT_INFO: Get the info for a slot e.g. slot uid, vCPUs count. 123 * @SLOT_ADD_BULK_VCPUS: Add a number of vCPUs, not providing CPU ids. 242 * @slot_uid: Slot unique id mapped to the slot to add vCPUs to. 243 * @nr_vcpus: Number of vCPUs to add to the slot.
|
| /linux/include/kvm/ |
| H A D | arm_vgic.h | 68 /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ 152 u8 targets; /* GICv2 target VCPUs mask */ 424 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW 426 * The host's GIC naturally limits the maximum amount of VCPUs a guest
|