| /linux/arch/x86/kvm/svm/ |
| H A D | sev.c | 152 static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm) in sev_vcpu_has_debug_swap() argument 154 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_has_debug_swap() 868 static int sev_es_sync_vmsa(struct vcpu_svm *svm) in sev_es_sync_vmsa() argument 870 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_vmsa() 872 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa() 879 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa() 888 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa() 891 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa() 892 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa() 893 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa() [all …]
|
| H A D | avic.c | 111 static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm, in avic_set_x2apic_msr_interception() argument 148 if (intercept == svm->x2avic_msrs_intercepted) in avic_set_x2apic_msr_interception() 155 svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i], in avic_set_x2apic_msr_interception() 158 svm->x2avic_msrs_intercepted = intercept; in avic_set_x2apic_msr_interception() 186 static void avic_activate_vmcb(struct vcpu_svm *svm) in avic_activate_vmcb() argument 188 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb() 189 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_activate_vmcb() 205 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb() 209 avic_set_x2apic_msr_interception(svm, false); in avic_activate_vmcb() 215 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb() [all …]
|
| H A D | hyperv.c | 11 struct vcpu_svm *svm = to_svm(vcpu); in svm_hv_inject_synthetic_vmexit_post_tlb_flush() local 13 svm->vmcb->control.exit_code = HV_SVM_EXITCODE_ENL; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 14 svm->vmcb->control.exit_code_hi = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 15 svm->vmcb->control.exit_info_1 = HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 16 svm->vmcb->control.exit_info_2 = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 17 nested_svm_vmexit(svm); in svm_hv_inject_synthetic_vmexit_post_tlb_flush()
|
| H A D | hyperv.h | 17 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_hv_update_vm_vp_ids() local 18 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_hv_update_vm_vp_ids() 31 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_l2_tlb_flush_enabled() local 32 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_l2_tlb_flush_enabled()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_svm.c | 94 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst) in nouveau_ivmm_find() argument 97 list_for_each_entry(ivmm, &svm->inst, head) { in nouveau_ivmm_find() 169 if (!cli->svm.svmm) { in nouveau_svmm_bind() 186 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr, in nouveau_svmm_bind() 210 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part() 211 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); in nouveau_svmm_part() 216 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part() 231 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join() 232 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); in nouveau_svmm_join() 233 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join() [all …]
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | nested_invalid_cr3_test.c | 21 static void l1_svm_code(struct svm_test_data *svm) in l1_svm_code() argument 26 generic_svm_setup(svm, l2_guest_code, in l1_svm_code() 30 save_cr3 = svm->vmcb->save.cr3; in l1_svm_code() 31 svm->vmcb->save.cr3 = -1ull; in l1_svm_code() 32 run_guest(svm->vmcb, svm->vmcb_gpa); in l1_svm_code() 33 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_ERR); in l1_svm_code() 36 svm->vmcb->save.cr3 = save_cr3; in l1_svm_code() 37 run_guest(svm->vmcb, svm->vmcb_gpa); in l1_svm_code() 38 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_svm_code()
|
| H A D | nested_exceptions_test.c | 74 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, in svm_run_l2() argument 77 struct vmcb *vmcb = svm->vmcb; in svm_run_l2() 81 run_guest(vmcb, svm->vmcb_gpa); in svm_run_l2() 91 static void l1_svm_code(struct svm_test_data *svm) in l1_svm_code() argument 93 struct vmcb_control_area *ctrl = &svm->vmcb->control; in l1_svm_code() 96 generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); in l1_svm_code() 97 svm->vmcb->save.idtr.limit = 0; in l1_svm_code() 101 svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE); in l1_svm_code() 102 svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD); in l1_svm_code() 105 svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE); in l1_svm_code() [all …]
|
| H A D | nested_tsc_adjust_test.c | 106 struct svm_test_data *svm = data; in l1_guest_code() local 108 generic_svm_setup(svm, l2_guest_code, in l1_guest_code() 111 svm->vmcb->control.tsc_offset = TSC_OFFSET_VALUE; in l1_guest_code() 112 run_guest(svm->vmcb, svm->vmcb_gpa); in l1_guest_code() 113 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
| H A D | nested_close_kvm_test.c | 48 static void l1_svm_code(struct svm_test_data *svm) in l1_svm_code() argument 53 generic_svm_setup(svm, l2_guest_code, in l1_svm_code() 56 run_guest(svm->vmcb, svm->vmcb_gpa); in l1_svm_code()
|
| H A D | nested_tsc_scaling_test.c | 83 static void l1_svm_code(struct svm_test_data *svm) in l1_svm_code() argument 90 generic_svm_setup(svm, l2_guest_code, in l1_svm_code() 97 run_guest(svm->vmcb, svm->vmcb_gpa); in l1_svm_code() 98 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_svm_code()
|
| H A D | state_test.c | 34 static void svm_l1_guest_code(struct svm_test_data *svm) in svm_l1_guest_code() argument 37 struct vmcb *vmcb = svm->vmcb; in svm_l1_guest_code() 39 GUEST_ASSERT(svm->vmcb_gpa); in svm_l1_guest_code() 41 generic_svm_setup(svm, svm_l2_guest_code, in svm_l1_guest_code() 45 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code() 49 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code()
|
| H A D | aperfmperf_test.c | 65 static void l1_svm_code(struct svm_test_data *svm) in l1_svm_code() argument 68 struct vmcb *vmcb = svm->vmcb; in l1_svm_code() 70 generic_svm_setup(svm, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); in l1_svm_code() 71 run_guest(vmcb, svm->vmcb_gpa); in l1_svm_code()
|
| /linux/arch/x86/kvm/ |
| H A D | Makefile | 24 kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o 26 kvm-amd-$(CONFIG_KVM_AMD_SEV) += svm/sev.o 27 kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o 32 kvm-amd-y += svm/svm_onhyperv.o 40 $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
|
| /linux/arch/arm/mm/ |
| H A D | ioremap.c | 53 struct static_vm *svm; in find_static_vm_paddr() local 56 list_for_each_entry(svm, &static_vmlist, list) { in find_static_vm_paddr() 57 vm = &svm->vm; in find_static_vm_paddr() 67 return svm; in find_static_vm_paddr() 75 struct static_vm *svm; in find_static_vm_vaddr() local 78 list_for_each_entry(svm, &static_vmlist, list) { in find_static_vm_vaddr() 79 vm = &svm->vm; in find_static_vm_vaddr() 86 return svm; in find_static_vm_vaddr() 92 void __init add_static_vm_early(struct static_vm *svm) in add_static_vm_early() argument 98 vm = &svm->vm; in add_static_vm_early() [all …]
|
| H A D | mmu.c | 1020 struct static_vm *svm; in iotable_init() local 1025 svm = memblock_alloc_or_panic(sizeof(*svm) * nr, __alignof__(*svm)); in iotable_init() 1030 vm = &svm->vm; in iotable_init() 1037 add_static_vm_early(svm++); in iotable_init() 1045 struct static_vm *svm; in vm_reserve_area_early() local 1047 svm = memblock_alloc_or_panic(sizeof(*svm), __alignof__(*svm)); in vm_reserve_area_early() 1049 vm = &svm->vm; in vm_reserve_area_early() 1054 add_static_vm_early(svm); in vm_reserve_area_early() 1079 struct static_vm *svm; in fill_pmd_gaps() local 1084 list_for_each_entry(svm, &static_vmlist, list) { in fill_pmd_gaps() [all …]
|
| H A D | mm.h | 75 extern __init void add_static_vm_early(struct static_vm *svm);
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_svm.c | 45 return container_of(gpusvm, struct xe_vm, svm.gpusvm); in gpusvm_to_vm() 101 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range() 104 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range() 105 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range() 107 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range() 173 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx); in xe_svm_range_notifier_event_end() 283 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector() 350 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector() 351 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector() 361 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector() [all …]
|
| /linux/tools/testing/vsock/ |
| H A D | vsock_perf.c | 104 struct sockaddr_vm svm; in vsock_connect() member 106 .svm = { in vsock_connect() 121 if (connect(fd, &addr.sa, sizeof(addr.svm)) < 0) { in vsock_connect() 147 struct sockaddr_vm svm; in run_receiver() member 149 .svm = { in run_receiver() 157 struct sockaddr_vm svm; in run_receiver() member 160 socklen_t clientaddr_len = sizeof(clientaddr.svm); in run_receiver() 173 if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) in run_receiver()
|
| H A D | vsock_diag_test.c | 343 struct sockaddr_vm svm; in test_listen_socket_server() member 345 .svm = { in test_listen_socket_server() 357 if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) { in test_listen_socket_server()
|
| H A D | util.c | 273 struct sockaddr_vm svm; in vsock_accept() member 275 socklen_t clientaddr_len = sizeof(clientaddr.svm); in vsock_accept() 296 if (clientaddr_len != sizeof(clientaddr.svm)) { in vsock_accept() 308 *clientaddrp = clientaddr.svm; in vsock_accept()
|
| H A D | vsock_test.c | 42 struct sockaddr_vm svm; in test_stream_connection_reset() member 44 .svm = { in test_stream_connection_reset() 57 ret = connect(fd, &addr.sa, sizeof(addr.svm)); in test_stream_connection_reset() 78 struct sockaddr_vm svm; in test_stream_bind_only_client() member 80 .svm = { in test_stream_bind_only_client() 96 ret = connect(fd, &addr.sa, sizeof(addr.svm)); in test_stream_bind_only_client()
|
| /linux/arch/x86/virt/ |
| H A D | Makefile | 2 obj-y += svm/ vmx/
|
| /linux/drivers/iommu/intel/ |
| H A D | Makefile | 6 obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | Makefile | 31 obj-$(CONFIG_PPC_SVM) += svm.o
|
| /linux/arch/x86/include/uapi/asm/ |
| H A D | kvm.h | 543 struct kvm_svm_nested_state_hdr svm; member 556 __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);
|