/linux/tools/virtio/ringtest/ |
H A D | virtio_ring_0_9.c | 41 struct guest { struct 52 } guest; argument 78 guest.avail_idx = 0; in alloc_ring() 79 guest.kicked_avail_idx = -1; in alloc_ring() 80 guest.last_used_idx = 0; in alloc_ring() 83 guest.free_head = 0; in alloc_ring() 89 guest.num_free = ring_size; in alloc_ring() 107 if (!guest.num_free) in add_inbuf() 111 head = (ring_size - 1) & (guest.avail_idx++); in add_inbuf() 113 head = guest.free_head; in add_inbuf() [all …]
|
H A D | ring.c | 59 struct guest { struct 65 } guest; argument 92 guest.avail_idx = 0; in alloc_ring() 93 guest.kicked_avail_idx = -1; in alloc_ring() 94 guest.last_used_idx = 0; in alloc_ring() 103 guest.num_free = ring_size; in alloc_ring() 116 if (!guest.num_free) in add_inbuf() 119 guest.num_free--; in add_inbuf() 120 head = (ring_size - 1) & (guest.avail_idx++); in add_inbuf() 145 unsigned head = (ring_size - 1) & guest.last_used_idx; in get_buf() [all …]
|
/linux/drivers/misc/cxl/ |
H A D | of.c | 35 afu->guest->handle = addr; in read_phys_addr() 38 afu->guest->p2n_phys += addr; in read_phys_addr() 39 afu->guest->p2n_size = size; in read_phys_addr() 77 return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL); in cxl_of_read_afu_handle() 106 of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints); in cxl_of_read_afu_properties() 107 afu->irqs_max = afu->guest->max_ints; in cxl_of_read_afu_properties() 159 pr_devel("AFU handle: %#llx\n", afu->guest->handle); in cxl_of_read_afu_properties() 161 afu->guest->p2n_phys, afu->guest->p2n_size); in cxl_of_read_afu_properties() 191 adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail), in read_adapter_irq_config() 193 if (adapter->guest->irq_avail == NULL) in read_adapter_irq_config() [all …]
|
H A D | guest.c | 117 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, in guest_collect_vpd() 120 rc = cxl_h_collect_vpd(afu->guest->handle, 0, in guest_collect_vpd() 158 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); in guest_get_irq_info() 186 rc = cxl_h_read_error_state(afu->guest->handle, &state); in afu_read_error_state() 203 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); in guest_slice_irq_err() 214 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); in guest_slice_irq_err() 228 for (i = 0; i < adapter->guest->irq_nranges; i++) { in irq_alloc_range() 229 cur = &adapter->guest->irq_avail[i]; in irq_alloc_range() 252 for (i = 0; i < adapter->guest->irq_nranges; i++) { in irq_free_range() 253 cur = &adapter->guest->irq_avail[i]; in irq_free_range() [all …]
|
/linux/arch/mips/include/asm/ |
H A D | cpu-features.h | 666 #define cpu_guest_has_conf1 (cpu_data[0].guest.conf & (1 << 1)) 669 #define cpu_guest_has_conf2 (cpu_data[0].guest.conf & (1 << 2)) 672 #define cpu_guest_has_conf3 (cpu_data[0].guest.conf & (1 << 3)) 675 #define cpu_guest_has_conf4 (cpu_data[0].guest.conf & (1 << 4)) 678 #define cpu_guest_has_conf5 (cpu_data[0].guest.conf & (1 << 5)) 681 #define cpu_guest_has_conf6 (cpu_data[0].guest.conf & (1 << 6)) 684 #define cpu_guest_has_conf7 (cpu_data[0].guest.conf & (1 << 7)) 687 #define cpu_guest_has_fpu (cpu_data[0].guest.options & MIPS_CPU_FPU) 690 #define cpu_guest_has_watch (cpu_data[0].guest.options & MIPS_CPU_WATCH) 693 #define cpu_guest_has_contextconfig (cpu_data[0].guest.options & MIPS_CPU_CTXTC) [all …]
|
/linux/Documentation/virt/kvm/x86/ |
H A D | running-nested-guests.rst | 7 A nested guest is the ability to run a guest inside another guest (it 9 example is a KVM guest that in turn runs on a KVM guest (the rest of 33 - L1 – level-1 guest; a VM running on L0; also called the "guest 36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest" 46 (guest hypervisor), L3 (nested guest). 61 Provider, using nested KVM lets you rent a large enough "guest 62 hypervisor" (level-1 guest). This in turn allows you to create 66 - Live migration of "guest hypervisors" and their nested guests, for 139 .. note:: If you suspect your L2 (i.e. nested guest) is running slower, 144 Starting a nested guest (x86) [all …]
|
H A D | mmu.rst | 8 for presenting a standard x86 mmu to the guest, while translating guest 14 the guest should not be able to determine that it is running 19 the guest must not be able to touch host memory not assigned 28 Linux memory management code must be in control of guest memory 32 report writes to guest memory to enable live migration 47 gfn guest frame number 48 gpa guest physical address 49 gva guest virtual address 50 ngpa nested guest physical address 51 ngva nested guest virtual address [all …]
|
H A D | amd-memory-encryption.rst | 98 __u16 ghcb_version; /* maximum guest GHCB version allowed */ 108 requests. If ``ghcb_version`` is 0 for any other guest type, then the maximum 109 allowed guest GHCB protocol will default to version 2. 133 context. To create the encryption context, user must provide a guest policy, 144 __u32 policy; /* guest's policy */ 146 … __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */ 149 … __u64 session_addr; /* userspace address which points to the guest session information */ 164 of the memory contents that can be sent to the guest owner as an attestation 184 data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may 185 wait to provide the guest with confidential information until it can verify the [all …]
|
H A D | cpuid.rst | 9 A guest running on a kvm host, can check some of its features using 12 a guest. 65 KVM_FEATURE_PV_UNHALT 7 guest checks this feature bit 69 KVM_FEATURE_PV_TLB_FLUSH 9 guest checks this feature bit 77 KVM_FEATURE_PV_SEND_IPI 11 guest checks this feature bit 85 KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit 89 KVM_FEATURE_ASYNC_PF_INT 14 guest checks this feature bit 95 KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit 99 KVM_FEATURE_HC_MAP_GPA_RANGE 16 guest checks this feature bit before 103 KVM_FEATURE_MIGRATION_CONTROL 17 guest checks this feature bit before [all …]
|
H A D | hypercalls.rst | 54 :Purpose: Trigger guest exit so that the host can check for pending 70 :Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid 81 :Purpose: To enable communication between the hypervisor and guest there is a 83 The guest can map this shared page to access its supervisor register 93 A vcpu of a paravirtualized guest that is busywaiting in guest 98 same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall, 107 :Purpose: Hypercall used to synchronize host and guest clocks. 111 a0: guest physical address where host copies 130 * tsc: guest TSC value used to calculate sec/nsec pair 133 The hypercall lets a guest compute a precise timestamp across [all …]
|
H A D | msr.rst | 25 in guest RAM. This memory is expected to hold a copy of the following 40 guest has to check version before and after grabbing 64 guest RAM, plus an enable bit in bit 0. This memory is expected to hold 87 guest has to check version before and after grabbing 127 coordinated between the guest and the hypervisor. Availability 139 | | | guest vcpu has been paused by | 196 which must be in guest RAM. This memory is expected to hold the 220 a token that will be used to notify the guest when missing page becomes 224 is currently supported, when set, it indicates that the guest is dealing 231 as regular page fault, guest must reset 'flags' to '0' before it does [all …]
|
/linux/Documentation/security/ |
H A D | snp-tdx-threat-model.rst | 46 integrity for the VM's guest memory and execution state (vCPU registers), 47 more tightly controlled guest interrupt injection, as well as some 48 additional mechanisms to control guest-host page mapping. More details on 53 The basic CoCo guest layout includes the host, guest, the interfaces that 54 communicate guest and host, a platform capable of supporting CoCo VMs, and 55 a trusted intermediary between the guest VM and the underlying platform 58 is still in charge of the guest lifecycle, i.e. create or destroy a CoCo 65 the rest of the components (data flow for guest, host, hardware) :: 68 | CoCo guest VM |<---->| | 136 (in contrast to a remote network attacker) and has control over the guest [all …]
|
/linux/Documentation/virt/hyperv/ |
H A D | coco.rst | 25 * AMD processor with SEV-SNP. Hyper-V does not run guest VMs with AMD SME, 40 * Fully-enlightened mode. In this mode, the guest operating system is 43 * Paravisor mode. In this mode, a paravisor layer between the guest and the 44 host provides some operations needed to run as a CoCo VM. The guest operating 49 points on a spectrum spanning the degree of guest enlightenment needed to run 53 guest OS with no knowledge of memory encryption or other aspects of CoCo VMs 56 aspects of CoCo VMs are handled by the Hyper-V paravisor while the guest OS 59 paravisor, and there is no standardized mechanism for a guest OS to query the 61 the paravisor provides is hard-coded in the guest OS. 64 a limited paravisor to provide services to the guest such as a virtual TPM. [all …]
|
H A D | vpci.rst | 5 In a Hyper-V guest VM, PCI pass-thru devices (also called 12 hypervisor. The device should appear to the guest just as it 24 and produces the same benefits by allowing a guest device 55 do not appear in the Linux guest's ACPI tables. vPCI devices 68 in the guest, or if the vPCI device is removed from 95 hv_pci_probe() allocates a guest MMIO range to be used as PCI 99 hv_pci_enter_d0(). When the guest subsequently accesses this 118 guest VM at any time during the life of the VM. The removal 120 is not under the control of the guest OS. 122 A guest VM is notified of the removal by an unsolicited [all …]
|
/linux/tools/perf/Documentation/ |
H A D | perf-kvm.txt | 6 perf-kvm - Tool to trace/measure kvm guest os 11 'perf kvm' [--host] [--guest] [--guestmount=<path> 14 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path> 23 a performance counter profile of guest os in realtime 28 default behavior of perf kvm as --guest, so if neither --host nor --guest 29 is input, the perf data file name is perf.data.guest. If --host is input, 31 perf.data.host, please input --host --no-guest. The behaviors are shown as 33 Default('') -> perf.data.guest 35 --guest -> perf.data.guest 36 --host --guest -> perf.data.kvm [all …]
|
H A D | guest-files.txt | 4 Guest OS /proc/kallsyms file copy. perf reads it to get guest 5 kernel symbols. Users copy it out from guest OS. 8 Guest OS /proc/modules file copy. perf reads it to get guest 9 kernel module information. Users copy it out from guest OS. 14 --guest-code:: 15 Indicate that guest code can be found in the hypervisor process,
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-hypervisor-xen | 6 Type of guest: 7 "Xen": standard guest type on arm 8 "HVM": fully virtualized guest (x86) 9 "PV": paravirtualized guest (x86) 10 "PVH": fully virtualized guest without legacy emulation (x86) 22 "self" The guest can profile itself 23 "hv" The guest can profile itself and, if it is 25 "all" The guest can profile itself, the hypervisor
|
/linux/Documentation/virt/kvm/s390/ |
H A D | s390-pv.rst | 10 access VM state like guest memory or guest registers. Instead, the 15 Each guest starts in non-protected mode and then may make a request to 16 transition into protected mode. On transition, KVM registers the guest 20 The Ultravisor will secure and decrypt the guest's boot memory 22 starts/stops and injected interrupts while the guest is running. 24 As access to the guest's state, such as the SIE state description, is 29 reduce exposed guest state. 40 field (offset 0x54). If the guest cpu is not enabled for the interrupt 50 access to the guest memory. 84 instruction text, in order not to leak guest instruction text. [all …]
|
/linux/Documentation/arch/s390/ |
H A D | vfio-ap.rst | 122 Let's now take a look at how AP instructions executed on a guest are interpreted 128 control domains assigned to the KVM guest: 131 to the KVM guest. Each bit in the mask, from left to right, corresponds to 133 use by the KVM guest. 136 assigned to the KVM guest. Each bit in the mask, from left to right, 138 corresponding queue is valid for use by the KVM guest. 141 assigned to the KVM guest. The ADM bit mask controls which domains can be 143 guest. Each bit in the mask, from left to right, corresponds to a domain from 153 adapters 1 and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs 154 (1,5), (1,6), (2,5) and (2,6) will be valid for the guest. [all …]
|
/linux/drivers/virt/coco/ |
H A D | Makefile | 7 obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/ 8 obj-$(CONFIG_SEV_GUEST) += sev-guest/ 9 obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/ 10 obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
|
H A D | Kconfig | 12 source "drivers/virt/coco/pkvm-guest/Kconfig" 14 source "drivers/virt/coco/sev-guest/Kconfig" 16 source "drivers/virt/coco/tdx-guest/Kconfig" 18 source "drivers/virt/coco/arm-cca-guest/Kconfig"
|
/linux/Documentation/arch/x86/ |
H A D | tdx.rst | 7 Intel's Trust Domain Extensions (TDX) protect confidential guest VMs from 8 the host and physical attacks by isolating the guest register state and by 9 encrypting the guest memory. In TDX, a special module running in a special 10 mode sits between the host and the guest and manages the guest/host 198 Since the host cannot directly access guest registers or memory, much 199 normal functionality of a hypervisor must be moved into the guest. This is 201 guest kernel. A #VE is handled entirely inside the guest kernel, but some 205 guest to the hypervisor or the TDX module. 249 indicates a bug in the guest. The guest may try to handle the #GP with a 255 The "just works" MSRs do not need any special guest handling. They might [all …]
|
/linux/arch/x86/xen/ |
H A D | Kconfig | 7 bool "Xen guest support" 20 bool "Xen PV guest support" 29 Support running as a Xen PV guest. 61 bool "Xen PVHVM guest support" 65 Support running as a Xen PVHVM guest. 81 bool "Xen PVH guest support" 85 Support for running as a Xen PVH guest. 94 Support running as a Xen Dom0 guest.
|
/linux/Documentation/virt/kvm/ |
H A D | vcpu-requests.rst | 48 The goal of a VCPU kick is to bring a VCPU thread out of guest mode in 50 a guest mode exit. However, a VCPU thread may not be in guest mode at the 55 1) Send an IPI. This forces a guest mode exit. 56 2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest 60 3) Nothing. When the VCPU is not in guest mode and the VCPU thread is not 67 guest is running in guest mode or not, as well as some specific 68 outside guest mode states. The architecture may use ``vcpu->mode`` to 76 The VCPU thread is outside guest mode. 80 The VCPU thread is in guest mode. 89 The VCPU thread is outside guest mode, but it wants the sender of [all …]
|
/linux/tools/virtio/virtio-trace/ |
H A D | README | 4 Trace agent is a user tool for sending trace data of a guest to a Host in low 48 For example, if a guest use three CPUs, the names are 83 example, if a guest use three CPUs, chardev names should be trace-path-cpu0, 86 3) Boot the guest 87 You can find some chardev in /dev/virtio-ports/ in the guest. 93 0) Build trace agent in a guest 96 1) Enable ftrace in the guest 100 2) Run trace agent in the guest 104 option, trace data are output via stdout in the guest. 109 the guest will stop by specification of chardev in QEMU. This blocking mode may [all …]
|