/linux/arch/powerpc/include/asm/ |
H A D | kvm_book3s_uvmem.h | 9 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); 10 void kvmppc_uvmem_slot_free(struct kvm *kvm, 12 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 16 unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, 20 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); 21 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); 22 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); 23 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm); 25 struct kvm *kvm, bool skip_page_out); 26 int kvmppc_uvmem_memslot_create(struct kvm *kvm, [all …]
|
H A D | kvm_ppc.h | 169 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); 170 extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); 172 extern void kvmppc_rmap_reset(struct kvm *kvm); 176 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, 178 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, 180 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); 181 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); 182 extern void kvmppc_setup_partition_table(struct kvm *kvm); 184 extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 202 extern int kvmppc_core_init_vm(struct kvm *kvm); [all …]
|
/linux/arch/x86/kvm/mmu/ |
H A D | page_track.c | 23 static bool kvm_external_write_tracking_enabled(struct kvm *kvm) in kvm_external_write_tracking_enabled() argument 30 return smp_load_acquire(&kvm->arch.external_write_tracking_enabled); in kvm_external_write_tracking_enabled() 36 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) in kvm_page_track_write_tracking_enabled() argument 38 return kvm_external_write_tracking_enabled(kvm) || in kvm_page_track_write_tracking_enabled() 39 kvm_shadow_root_allocated(kvm) || !tdp_enabled; in kvm_page_track_write_tracking_enabled() 60 int kvm_page_track_create_memslot(struct kvm *kvm, in kvm_page_track_create_memslot() argument 64 if (!kvm_page_track_write_tracking_enabled(kvm)) in kvm_page_track_create_memslot() 90 void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, in __kvm_write_track_add_gfn() argument 93 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_write_track_add_gfn() 95 lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || in __kvm_write_track_add_gfn() [all …]
|
H A D | tdp_mmu.c | 15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu() argument 17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu() 18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu() 22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument 26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument 40 kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS); in kvm_mmu_uninit_tdp_mmu() 41 kvm_tdp_mmu_zap_invalidated_roots(kvm, false); in kvm_mmu_uninit_tdp_mmu() 44 KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); in kvm_mmu_uninit_tdp_mmu() [all …]
|
H A D | page_track.h | 10 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); 14 int kvm_page_track_create_memslot(struct kvm *kvm, 18 void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, 20 void __kvm_write_track_remove_gfn(struct kvm *kvm, 23 bool kvm_gfn_is_write_tracked(struct kvm *kvm, 27 int kvm_page_track_init(struct kvm *kvm); 28 void kvm_page_track_cleanup(struct kvm *kvm); 30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); 31 void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); 33 static inline bool kvm_page_track_has_external_user(struct kvm *kvm) in kvm_page_track_has_external_user() argument [all …]
|
H A D | mmu.c | 284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep() argument 289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep() 522 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits() argument 536 kvm_update_page_stats(kvm, level, -1); in mmu_spte_clear_track_bits() 603 if (kvm_has_mirrored_tdp(vcpu->kvm)) { in mmu_topup_memory_caches() 749 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed() argument 755 kvm->arch.indirect_shadow_pages++; in account_shadowed() 766 slots = kvm_memslots_for_spte_role(kvm, sp->role); in account_shadowed() 771 return __kvm_write_track_add_gfn(kvm, slot, gfn); in account_shadowed() 775 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in account_shadowed() [all …]
|
/linux/include/linux/ |
H A D | kvm_host.h | 187 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 189 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 227 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 229 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 231 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 273 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 274 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 275 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 325 struct kvm *kvm; member 668 struct kvm *kvm, int irq_source_id, int level, [all …]
|
/linux/arch/s390/kvm/ |
H A D | pv.c | 23 bool kvm_s390_pv_is_protected(struct kvm *kvm) in kvm_s390_pv_is_protected() argument 25 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected() 26 return !!kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_is_protected() 46 int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb) in kvm_s390_pv_make_secure() argument 50 lockdep_assert_held(&kvm->srcu); in kvm_s390_pv_make_secure() 52 vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr)); in kvm_s390_pv_make_secure() 55 return make_hva_secure(kvm->mm, vmaddr, uvcb); in kvm_s390_pv_make_secure() 58 int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr) in kvm_s390_pv_convert_to_secure() argument 63 .guest_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_convert_to_secure() 67 return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb); in kvm_s390_pv_convert_to_secure() [all …]
|
H A D | kvm-s390.c | 275 static int sca_switch_to_extended(struct kvm *kvm); 308 struct kvm *kvm; in kvm_clock_sync() local 313 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync() 314 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync() 317 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync() 318 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync() 576 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument 617 if (hpage && !(kvm && kvm_is_ucontrol(kvm))) in kvm_vm_ioctl_check_extension() 696 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument 701 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log() [all …]
|
/linux/virt/kvm/ |
H A D | eventfd.c | 36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument 46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local 49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject() 51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject() 54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject() 63 srcu_read_lock_held(&resampler->kvm->irq_srcu)) in irqfd_resampler_notify() 76 struct kvm *kvm; in irqfd_resampler_ack() local 81 kvm = resampler->kvm; in irqfd_resampler_ack() 83 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack() 86 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack() [all …]
|
H A D | kvm_main.c | 151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) in kvm_arch_guest_memory_reclaimed() argument 244 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument 258 vcpu = kvm_get_vcpu(kvm, i); in kvm_make_vcpus_request_mask() 270 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument 283 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_make_all_cpus_request() 293 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument 295 ++kvm->stat.generic.remote_tlb_flush_requests; in kvm_flush_remote_tlbs() 308 if (!kvm_arch_flush_remote_tlbs(kvm) in kvm_flush_remote_tlbs() 309 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs() [all …]
|
H A D | kvm_mm.h | 14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) argument 15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) argument 16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) argument 18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) argument 19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm) argument 20 KVM_MMU_UNLOCK(kvm) global() argument 31 gfn_to_pfn_cache_invalidate_start(struct kvm * kvm,unsigned long start,unsigned long end) gfn_to_pfn_cache_invalidate_start() argument 50 kvm_gmem_bind(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned int fd,loff_t offset) kvm_gmem_bind() argument [all...] |
H A D | irqchip.c | 21 int kvm_irq_map_gsi(struct kvm *kvm, in kvm_irq_map_gsi() argument 28 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_irq_map_gsi() 29 lockdep_is_held(&kvm->irq_lock)); in kvm_irq_map_gsi() 40 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_map_chip_pin() argument 44 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in kvm_irq_map_chip_pin() 48 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) in kvm_send_userspace_msi() argument 52 if (!kvm_arch_irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID)) in kvm_send_userspace_msi() 61 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); in kvm_send_userspace_msi() 70 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, in kvm_set_irq() argument 82 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_set_irq() [all …]
|
/linux/arch/riscv/kvm/ |
H A D | mmu.c | 19 static void mmu_wp_memory_region(struct kvm *kvm, int slot) in mmu_wp_memory_region() argument 21 struct kvm_memslots *slots = kvm_memslots(kvm); in mmu_wp_memory_region() 27 gstage.kvm = kvm; in mmu_wp_memory_region() 29 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in mmu_wp_memory_region() 30 gstage.pgd = kvm->arch.pgd; in mmu_wp_memory_region() 32 spin_lock(&kvm->mmu_lock); in mmu_wp_memory_region() 34 spin_unlock(&kvm->mmu_lock); in mmu_wp_memory_region() 35 kvm_flush_remote_tlbs_memslot(kvm, memslot); in mmu_wp_memory_region() 38 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, in kvm_riscv_mmu_ioremap() argument 52 gstage.kvm = kvm; in kvm_riscv_mmu_ioremap() [all …]
|
H A D | Makefile | 8 include $(srctree)/virt/kvm/Makefile.kvm 10 obj-$(CONFIG_KVM) += kvm.o 13 kvm-y += aia.o 14 kvm-y += aia_aplic.o 15 kvm-y += aia_device.o 16 kvm-y += aia_imsic.o 17 kvm-y += gstage.o 18 kvm-y += main.o 19 kvm-y += mmu.o 20 kvm-y += nacl.o [all …]
|
H A D | aia_device.c | 19 struct kvm *kvm = dev->kvm; in aia_create() local 22 if (irqchip_in_kernel(kvm)) in aia_create() 26 if (kvm_trylock_all_vcpus(kvm)) in aia_create() 29 kvm_for_each_vcpu(i, vcpu, kvm) { in aia_create() 35 kvm->arch.aia.in_kernel = true; in aia_create() 38 kvm_unlock_all_vcpus(kvm); in aia_create() 47 static int aia_config(struct kvm *kvm, unsigned long type, in aia_config() argument 50 struct kvm_aia *aia = &kvm->arch.aia; in aia_config() 53 if (write && kvm_riscv_aia_initialized(kvm)) in aia_config() 141 static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write) in aia_aplic_addr() argument [all …]
|
H A D | vm.c | 31 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument 35 r = kvm_riscv_mmu_alloc_pgd(kvm); in kvm_arch_init_vm() 39 r = kvm_riscv_gstage_vmid_init(kvm); in kvm_arch_init_vm() 41 kvm_riscv_mmu_free_pgd(kvm); in kvm_arch_init_vm() 45 kvm_riscv_aia_init_vm(kvm); in kvm_arch_init_vm() 47 kvm_riscv_guest_timer_init(kvm); in kvm_arch_init_vm() 52 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument 54 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm() 56 kvm_riscv_aia_destroy_vm(kvm); in kvm_arch_destroy_vm() 59 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql, in kvm_vm_ioctl_irq_line() argument [all …]
|
/linux/arch/arm64/kvm/vgic/ |
H A D | vgic-init.c | 52 void kvm_vgic_early_init(struct kvm *kvm) in kvm_vgic_early_init() argument 54 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_vgic_early_init() 71 int kvm_vgic_create(struct kvm *kvm, u32 type) in kvm_vgic_create() argument 95 lockdep_assert_held(&kvm->lock); in kvm_vgic_create() 102 if (kvm_trylock_all_vcpus(kvm)) in kvm_vgic_create() 109 mutex_lock(&kvm->arch.config_lock); in kvm_vgic_create() 118 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) in kvm_vgic_create() 121 if (irqchip_in_kernel(kvm)) { in kvm_vgic_create() 126 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create() 133 kvm->max_vcpus = VGIC_V2_MAX_CPUS; in kvm_vgic_create() [all …]
|
H A D | vgic.h | 123 return vcpu->kvm->arch.vgic.implementation_rev; in vgic_get_implementation_rev() 154 static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, in vgic_write_guest_lock() argument 157 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_write_guest_lock() 161 ret = kvm_write_guest_lock(kvm, gpa, data, len); in vgic_write_guest_lock() 230 struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid); 232 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); 236 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 238 void vgic_kick_vcpus(struct kvm *kvm); 242 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, 259 int vgic_v2_map_resources(struct kvm *kvm); [all …]
|
/linux/arch/loongarch/kvm/ |
H A D | vm.c | 27 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument 32 kvm->arch.pgd = kvm_pgd_alloc(); in kvm_arch_init_vm() 33 if (!kvm->arch.pgd) in kvm_arch_init_vm() 36 kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT); in kvm_arch_init_vm() 37 if (!kvm->arch.phyid_map) { in kvm_arch_init_vm() 38 free_page((unsigned long)kvm->arch.pgd); in kvm_arch_init_vm() 39 kvm->arch.pgd = NULL; in kvm_arch_init_vm() 42 spin_lock_init(&kvm->arch.phyid_map_lock); in kvm_arch_init_vm() 44 kvm_init_vmcs(kvm); in kvm_arch_init_vm() 47 kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); in kvm_arch_init_vm() [all …]
|
H A D | mmu.c | 26 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) in kvm_ptw_prepare() argument 28 ctx->level = kvm->arch.root_level; in kvm_ptw_prepare() 30 ctx->invalid_ptes = kvm->arch.invalid_ptes; in kvm_ptw_prepare() 31 ctx->pte_shifts = kvm->arch.pte_shifts; in kvm_ptw_prepare() 34 ctx->opaque = kvm; in kvm_ptw_prepare() 90 struct kvm *kvm; in kvm_flush_pte() local 92 kvm = ctx->opaque; in kvm_flush_pte() 94 kvm->stat.hugepages--; in kvm_flush_pte() 96 kvm->stat.pages--; in kvm_flush_pte() 149 static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, in kvm_populate_gpa() argument [all …]
|
/linux/arch/x86/kvm/ |
H A D | irq.h | 27 struct kvm; 55 struct kvm *kvm; member 64 int kvm_pic_init(struct kvm *kvm); 65 void kvm_pic_destroy(struct kvm *kvm); 66 int kvm_pic_read_irq(struct kvm *kvm); 68 int kvm_pic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 71 int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm); 73 int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip); 74 int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip); 76 static inline int irqchip_full(struct kvm *kvm) in irqchip_full() argument [all …]
|
H A D | irq.c | 82 if (pic_in_kernel(v->kvm)) in kvm_cpu_has_extint() 83 return v->kvm->arch.vpic->output; in kvm_cpu_has_extint() 86 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_has_extint() 140 return v->kvm->arch.xen.upcall_vector; in kvm_cpu_get_extint() 144 if (pic_in_kernel(v->kvm)) in kvm_cpu_get_extint() 145 return kvm_pic_read_irq(v->kvm); /* PIC */ in kvm_cpu_get_extint() 148 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_get_extint() 186 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument 190 return resample ? irqchip_full(kvm) : irqchip_in_kernel(kvm); in kvm_arch_irqfd_allowed() 193 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument [all …]
|
/linux/arch/powerpc/kvm/ |
H A D | Makefile | 6 ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm 8 include $(srctree)/virt/kvm/Makefile.kvm 16 kvm-e500-objs := \ 26 kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) 28 kvm-e500mc-objs := \ 38 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 40 kvm-pr-y := \ 53 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ 58 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ 62 kvm-hv-y += \ [all …]
|
/linux/tools/perf/ |
H A D | builtin-kvm.c | 513 static void print_result(struct perf_kvm_stat *kvm); 582 static void kvm_display(struct perf_kvm_stat *kvm) in kvm_display() argument 585 print_result(kvm); in kvm_display() 592 static void kvm_display(struct perf_kvm_stat *kvm) in kvm_display() argument 595 print_result(kvm); in kvm_display() 618 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) in register_kvm_events_ops() argument 623 if (!strcmp(events_ops->name, kvm->report_event)) { in register_kvm_events_ops() 624 kvm->events_ops = events_ops->ops; in register_kvm_events_ops() 720 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, in find_create_kvm_event() argument 736 kvm->events_ops->decode_key(kvm, key, ki->name); in find_create_kvm_event() [all …]
|