Home
last modified time | relevance | path

Searched full:vm (Results 1 – 25 of 1005) sorted by relevance

12345678910>>...41

/linux/drivers/gpu/drm/xe/
H A Dxe_vm.h34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get() argument
36 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
37 return vm; in xe_vm_get()
40 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
42 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
47 void xe_vm_unlock(struct xe_vm *vm);
49 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
51 /* Only guaranteed not to change when vm->lock is held */ in xe_vm_is_closed()
52 return !vm->size; in xe_vm_is_closed()
[all …]
H A Dxe_vm.c45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
47 return vm->gpuvm.r_obj; in xe_vm_obj()
56 * without the vm->userptr.notifier_lock held. There is no guarantee that the
72 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
73 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
75 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
81 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
85 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
86 xe_vm_assert_held(vm); in preempt_fences_waiting()
88 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
[all …]
H A Dxe_svm.c112 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range() argument
115 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range()
121 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
124 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range()
125 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
128 &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
132 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_begin() argument
137 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin()
142 xe_svm_assert_in_notifier(vm); in xe_svm_range_notifier_event_begin()
162 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin()
[all …]
/linux/tools/testing/selftests/kvm/lib/arm64/
H A Dprocessor.c23 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
25 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
28 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
30 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
31 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
36 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
38 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
39 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
41 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
42 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
/linux/tools/testing/selftests/kvm/lib/loongarch/
H A Dprocessor.c15 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in virt_pte_index() argument
20 shift = level * (vm->page_shift - 3) + vm->page_shift; in virt_pte_index()
21 mask = (1UL << (vm->page_shift - 3)) - 1; in virt_pte_index()
25 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument
27 return entry & ~((0x1UL << vm->page_shift) - 1); in pte_addr()
30 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument
32 return 1 << (vm->page_shift - 3); in ptrs_per_pte()
35 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) in virt_set_pgtable() argument
40 ptep = addr_gpa2hva(vm, table); in virt_set_pgtable()
41 ptrs_per_pte = 1 << (vm->page_shift - 3); in virt_set_pgtable()
[all …]
/linux/drivers/gpu/drm/lima/
H A Dlima_vm.c18 struct lima_vm *vm; member
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument
43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range()
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument
52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page()
57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page()
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page()
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page()
60 if (!vm->bts[pbe].cpu) in lima_vm_map_page()
63 pts = vm->bts[pbe].dma; in lima_vm_map_page()
[all …]
/linux/drivers/virt/acrn/
H A Dvm.c21 * is wrote in VM creation ioctl. Use the rwlock mechanism to protect it.
25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument
33 "Failed to create VM! Error: %d\n", ret); in acrn_vm_create()
37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create()
38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create()
39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create()
40 vm->vmid = vm_param->vmid; in acrn_vm_create()
41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create()
43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create()
45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create()
[all …]
H A Dirqfd.c21 * @vm: Associated VM pointer
25 * @list: Entry within &acrn_vm.irqfds of irqfds of a VM
30 struct acrn_vm *vm; member
41 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local
43 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject()
51 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown()
63 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local
66 vm = irqfd->vm; in hsm_irqfd_shutdown_work()
67 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
70 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
[all …]
H A Dacrn_drv.h28 * @user_vm_pa: Physical address of User VM to be mapped.
29 * @service_vm_pa: Physical address of Service VM to be mapped.
33 * to manage the EPT mappings of a single memory region of the User VM. Several
47 * @vmid: A User VM ID.
54 * multiple memory regions of a User VM. A &struct vm_memory_region_batch
67 * struct vm_memory_mapping - Memory map between a User VM and the Service VM
68 * @pages: Pages in Service VM kernel.
70 * @service_vm_va: Virtual address in Service VM kernel.
71 * @user_vm_pa: Physical address in User VM.
74 * HSM maintains memory mappings between a User VM GPA and the Service VM
[all …]
H A Dmm.c19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) in modify_region() argument
28 regions->vmid = vm->vmid; in modify_region()
35 "Failed to set memory region for VM[%u]!\n", vm->vmid); in modify_region()
43 * @vm: User VM.
44 * @user_gpa: A GPA of User VM.
45 * @service_gpa: A GPA of Service VM.
52 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, in acrn_mm_region_add() argument
68 ret = modify_region(vm, region); in acrn_mm_region_add()
79 * @vm: User VM.
80 * @user_gpa: A GPA of the User VM.
[all …]
H A Dioeventfd.c19 * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) in acrn_ioeventfd_shutdown() argument
45 lockdep_assert_held(&vm->ioeventfds_lock); in acrn_ioeventfd_shutdown()
52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, in hsm_ioeventfd_is_conflict() argument
57 lockdep_assert_held(&vm->ioeventfds_lock); in hsm_ioeventfd_is_conflict()
60 list_for_each_entry(p, &vm->ioeventfds, list) in hsm_ioeventfd_is_conflict()
72 * Assign an eventfd to a VM and create a HSM ioeventfd associated with the
76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, in acrn_ioeventfd_assign() argument
121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign()
123 if (hsm_ioeventfd_is_conflict(vm, p)) { in acrn_ioeventfd_assign()
[all …]
H A Dhypercall.h53 * hcall_sos_remove_cpu() - Remove a vCPU of Service VM
64 * hcall_create_vm() - Create a User VM
65 * @vminfo: Service VM GPA of info of User VM creation
75 * hcall_start_vm() - Start a User VM
76 * @vmid: User VM ID
86 * hcall_pause_vm() - Pause a User VM
87 * @vmid: User VM ID
97 * hcall_destroy_vm() - Destroy a User VM
98 * @vmid: User VM ID
108 * hcall_reset_vm() - Reset a User VM
[all …]
H A Dioreq.c39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument
52 * in which User VMs and Service VM are bound to dedicated CPU cores. in ioreq_complete_request()
64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request()
79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request()
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request()
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request()
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument
97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
98 if (vm->default_client) in acrn_ioreq_request_default_complete()
99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete()
[all …]
H A Dhsm.c25 * represent a VM instance and continues to be associated with the opened file
27 * the VM instance. Release of this file descriptor will destroy the object.
31 struct acrn_vm *vm; in acrn_dev_open() local
33 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in acrn_dev_open()
34 if (!vm) in acrn_dev_open()
37 vm->vmid = ACRN_INVALID_VMID; in acrn_dev_open()
38 filp->private_data = vm; in acrn_dev_open()
110 struct acrn_vm *vm = filp->private_data; in acrn_dev_ioctl() local
126 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) { in acrn_dev_ioctl()
128 "ioctl 0x%x: Invalid VM state!\n", cmd); in acrn_dev_ioctl()
[all …]
/linux/tools/testing/selftests/kvm/include/
H A Dkvm_util.h59 struct kvm_vm *vm; member
141 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument
142 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
143 if (!((vcpu) = vm->vcpus[i])) \
148 memslot2region(struct kvm_vm *vm, uint32_t memslot);
150 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument
154 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region()
303 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument
305 #define __vm_ioctl(vm, cm argument
319 __TEST_ASSERT_VM_VCPU_IOCTL(cond,name,ret,vm) global() argument
337 TEST_ASSERT_VM_VCPU_IOCTL(cond,cmd,ret,vm) global() argument
340 vm_ioctl(vm,cmd,arg) global() argument
366 vm_check_cap(struct kvm_vm * vm,long cap) vm_check_cap() argument
374 __vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0) __vm_enable_cap() argument
380 vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0) vm_enable_cap() argument
387 vm_set_memory_attributes(struct kvm_vm * vm,uint64_t gpa,uint64_t size,uint64_t attributes) vm_set_memory_attributes() argument
408 vm_mem_set_private(struct kvm_vm * vm,uint64_t gpa,uint64_t size) vm_mem_set_private() argument
414 vm_mem_set_shared(struct kvm_vm * vm,uint64_t gpa,uint64_t size) vm_mem_set_shared() argument
423 vm_guest_mem_punch_hole(struct kvm_vm * vm,uint64_t gpa,uint64_t size) vm_guest_mem_punch_hole() argument
429 vm_guest_mem_allocate(struct kvm_vm * vm,uint64_t gpa,uint64_t size) vm_guest_mem_allocate() argument
446 kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log) kvm_vm_get_dirty_log() argument
453 kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages) kvm_vm_clear_dirty_log() argument
466 kvm_vm_reset_dirty_ring(struct kvm_vm * vm) kvm_vm_reset_dirty_ring() argument
471 kvm_vm_register_coalesced_io(struct kvm_vm * vm,uint64_t address,uint64_t size,bool pio) kvm_vm_register_coalesced_io() argument
484 kvm_vm_unregister_coalesced_io(struct kvm_vm * vm,uint64_t address,uint64_t size,bool pio) kvm_vm_unregister_coalesced_io() argument
497 vm_get_stats_fd(struct kvm_vm * vm) vm_get_stats_fd() argument
555 vm_get_stat(vm,stat) global() argument
560 __vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags) __vm_create_guest_memfd() argument
571 vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags) vm_create_guest_memfd() argument
600 vm_arch_has_protected_memory(struct kvm_vm * vm) vm_arch_has_protected_memory() argument
634 vm_untag_gpa(struct kvm_vm * vm,vm_paddr_t gpa) vm_untag_gpa() argument
843 kvm_create_device(struct kvm_vm * vm,uint64_t type) kvm_create_device() argument
892 vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot) vm_phy_pages_alloc() argument
1002 sync_global_to_guest(vm,g) global() argument
1007 sync_global_from_guest(vm,g) global() argument
1018 write_guest_global(vm,g,val) global() argument
1046 vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code) vm_vcpu_add() argument
1059 vm_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id) vm_vcpu_recreate() argument
1069 virt_pgd_alloc(struct kvm_vm * vm) virt_pgd_alloc() argument
1092 virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr) virt_pg_map() argument
1115 addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva) addr_gva2gpa() argument
1137 virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent) virt_dump() argument
1143 __vm_disable_nx_huge_pages(struct kvm_vm * vm) __vm_disable_nx_huge_pages() argument
[all...]
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c183 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument
185 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
186 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
188 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
189 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
192 static void vm_open(struct kvm_vm *vm) in vm_open() argument
194 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
198 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
199 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
202 vm->stats.fd = vm_get_stats_fd(vm); in vm_open()
[all …]
/linux/drivers/gpu/drm/radeon/
H A Dradeon_vm.c37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
42 * Each VM has an ID associated with it and there is a page table
78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
104 * Tear down the VM manager (cayman+).
120 * radeon_vm_get_bos - add the vm BOs to a validation list
123 * @vm: vm providing the BOs
130 struct radeon_vm *vm, in radeon_vm_get_bos() argument
[all …]
/linux/tools/testing/selftests/kvm/s390/
H A Dcmma_test.c97 static void create_main_memslot(struct kvm_vm *vm) in create_main_memslot() argument
101 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0); in create_main_memslot()
104 vm->memslots[i] = 0; in create_main_memslot()
107 static void create_test_memslot(struct kvm_vm *vm) in create_test_memslot() argument
109 vm_userspace_mem_region_add(vm, in create_test_memslot()
111 TEST_DATA_START_GFN << vm->page_shift, in create_test_memslot()
116 vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT; in create_test_memslot()
119 static void create_memslots(struct kvm_vm *vm) in create_memslots() argument
122 * Our VM has the following memory layout: in create_memslots()
135 create_main_memslot(vm); in create_memslots()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen8_ppgtt.c90 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt()
91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt()
102 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt()
180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument
182 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count()
184 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count()
188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument
190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index()
192 if (vm->top == 2) in gen8_pdp_for_page_index()
195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index()
[all …]
H A Dgen6_ppgtt.c23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde()
74 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, in gen6_ppgtt_clear_range() argument
77 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_ppgtt_clear_range()
79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range()
110 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, in gen6_ppgtt_insert_entries() argument
115 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in gen6_ppgtt_insert_entries()
120 const u32 pte_encode = vm->pte_encode(0, pat_index, flags); in gen6_ppgtt_insert_entries()
166 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); in gen6_flush_pd()
172 static void gen6_alloc_va_range(struct i915_address_space *vm, in gen6_alloc_va_range() argument
176 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_alloc_va_range()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument
51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument
56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument
73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt()
74 ppgtt->vm.i915 = i915; in mock_ppgtt()
75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt()
[all …]
H A Di915_gem_gtt.c170 if (!ppgtt->vm.allocate_va_range) in igt_ppgtt_alloc()
181 limit = min(ppgtt->vm.total, limit); in igt_ppgtt_alloc()
185 err = i915_vm_lock_objects(&ppgtt->vm, &ww); in igt_ppgtt_alloc()
193 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size); in igt_ppgtt_alloc()
197 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
199 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
203 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); in igt_ppgtt_alloc()
206 ppgtt->vm.clear_range(&ppgtt->vm, 0, size); in igt_ppgtt_alloc()
208 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
215 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last); in igt_ppgtt_alloc()
[all …]
/linux/sound/pci/ctxfi/
H A Dctvmem.c26 * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
42 mutex_lock(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
52 /* Move the vm node from unused list to used list directly */ in get_vm_block()
53 list_move(&entry->list, &vm->used); in get_vm_block()
54 vm->size -= size; in get_vm_block()
65 list_add(&block->list, &vm->used); in get_vm_block()
[all …]
/linux/tools/testing/selftests/kvm/lib/riscv/
H A Dprocessor.c29 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
34 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument
40 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
63 TEST_ASSERT(level < vm->pgtable_levels, in pte_index()
69 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc()
73 if (vm->pgd_created) in virt_arch_pgd_alloc()
76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc()
[all …]
/linux/arch/x86/include/asm/
H A Dvmxfeatures.h16 /* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */
17 #define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* VM-Exit on vectored interrupts */
18 #define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* VM-Exit on NMIs */
34 /* VM-Functions, shifted to bits 28-31 */
37 /* Primary Processor-Based VM-Execution Controls, word 1 */
38 #define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* VM-Exit if INTRs are unblocked in guest */
40 #define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* VM-Exit on HLT */
41 #define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* VM-Exit on INVLPG */
42 #define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* VM-Exit on MWAIT */
43 #define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* VM-Exit on RDPMC */
[all …]

12345678910>>...41