Searched refs:pv_ops (Results 1 – 14 of 14) sorted by relevance
22 (hypervisor,下文简称超级管理器),需要不同的二进制内核,这个限制已经被pv_ops移23 除了。Linux pv_ops是一个虚拟化API,它能够支持不同的管理程序。它允许每个管理程序27 pv_ops提供了一组函数指针,代表了与低级关键指令和各领域高级功能相对应的操作。30 pv_ops操作被分为三类:
138 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; in xen_init_spinlocks()139 pv_ops.lock.queued_spin_unlock = in xen_init_spinlocks()141 pv_ops.lock.wait = xen_qlock_wait; in xen_init_spinlocks()142 pv_ops.lock.kick = xen_qlock_kick; in xen_init_spinlocks()143 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); in xen_init_spinlocks()
43 static const typeof(pv_ops) xen_irq_ops __initconst = {57 pv_ops.irq = xen_irq_ops.irq; in xen_init_irq_ops()
65 pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap; in xen_hvm_init_mmu_ops()
9 different hypervisors; this restriction was removed with pv_ops.10 Linux pv_ops is a virtualization API which enables support for different15 pv_ops provides a set of function pointers which represent operations17 functionalities in various areas. pv_ops allows for optimizations at run21 pv_ops operations are classified into three categories:
53 if (!f->pv_ops) { in objtool_pv_add()70 list_add(&func->pv_target, &f->pv_ops[idx].targets); in objtool_pv_add()71 f->pv_ops[idx].clean = false; in objtool_pv_add()
578 const char *pv_ops; in init_pv_ops() local585 file->pv_ops = NULL; in init_pv_ops()592 file->pv_ops = calloc(nr, sizeof(struct pv_state)); in init_pv_ops()593 if (!file->pv_ops) { in init_pv_ops()599 INIT_LIST_HEAD(&file->pv_ops[idx].targets); in init_pv_ops()601 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) { in init_pv_ops()602 if (add_pv_ops(file, pv_ops)) in init_pv_ops()3421 if (file->pv_ops[idx].clean) in pv_call_dest()3424 file->pv_ops[idx].clean = true; in pv_call_dest()3426 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { in pv_call_dest()[all …]
20 return pv_ops.lock.queued_spin_unlock.func == in pv_is_native_spin_unlock()32 return pv_ops.lock.vcpu_is_preempted.func == in pv_is_native_vcpu_is_preempted()
316 pv_ops.cpu.io_delay = kvm_io_delay; in paravirt_ops_setup()831 pv_ops.lock.vcpu_is_preempted = in kvm_guest_init()845 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; in kvm_guest_init()958 pv_ops.mmu.notify_page_enc_status_changed = in kvm_init_platform()1128 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; in kvm_spinlock_init()1129 pv_ops.lock.queued_spin_unlock = in kvm_spinlock_init()1131 pv_ops.lock.wait = kvm_wait; in kvm_spinlock_init()1132 pv_ops.lock.kick = kvm_kick_cpu; in kvm_spinlock_init()
236 extern struct paravirt_patch_template pv_ops;238 #define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op)364 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)366 #define PVOP_TEST_NULL(op) ((void)pv_ops.op)
48 pv_ops.cpu.load_gs_index = native_lkgs; in lkgs_init()
39 struct pv_state *pv_ops; member
1181 pv_ops.irq.safe_halt = tdx_safe_halt; in tdx_early_init()1182 pv_ops.irq.halt = tdx_halt; in tdx_early_init()
341 pv_ops.cpu.io_delay = paravirt_nop; in vmware_paravirt_ops_setup()