Home
last modified time | relevance | path

Searched refs:vm (Results 1 – 25 of 519) sorted by relevance

12345678910>>...21

/linux/drivers/virtio/
H A Dvirtio_mem.c283 static void virtio_mem_retry(struct virtio_mem *vm);
284 static int virtio_mem_create_resource(struct virtio_mem *vm);
285 static void virtio_mem_delete_resource(struct virtio_mem *vm);
291 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument
300 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
310 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
314 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
341 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument
344 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id()
350 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument
[all …]
/linux/tools/testing/selftests/kvm/lib/aarch64/
H A Dprocessor.c23 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
25 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
28 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
30 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
31 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
36 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
38 unsigned int shift = 2 * (vm in pud_index()
47 pmd_index(struct kvm_vm * vm,vm_vaddr_t gva) pmd_index() argument
58 pte_index(struct kvm_vm * vm,vm_vaddr_t gva) pte_index() argument
64 use_lpa2_pte_format(struct kvm_vm * vm) use_lpa2_pte_format() argument
70 addr_pte(struct kvm_vm * vm,uint64_t pa,uint64_t attrs) addr_pte() argument
88 pte_addr(struct kvm_vm * vm,uint64_t pte) pte_addr() argument
104 ptrs_per_pgd(struct kvm_vm * vm) ptrs_per_pgd() argument
110 ptrs_per_pte(struct kvm_vm * vm) ptrs_per_pte() argument
115 virt_arch_pgd_alloc(struct kvm_vm * vm) virt_arch_pgd_alloc() argument
128 _virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t flags) _virt_pg_map() argument
173 virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr) virt_arch_pg_map() argument
180 virt_get_pte_hva(struct kvm_vm * vm,vm_vaddr_t gva) virt_get_pte_hva() argument
218 addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva) addr_arch_gva2gpa() argument
225 pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level) pte_dump() argument
244 virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent) virt_arch_dump() argument
264 struct kvm_vm *vm = vcpu->vm; aarch64_vcpu_setup() local
375 __aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init) __aarch64_vcpu_add() argument
394 aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init,void * guest_code) aarch64_vcpu_add() argument
404 vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id) vm_arch_vcpu_add() argument
496 vm_init_descriptor_tables(struct kvm_vm * vm) vm_init_descriptor_tables() argument
504 vm_install_sync_handler(struct kvm_vm * vm,int vector,int ec,void (* handler)(struct ex_regs *)) vm_install_sync_handler() argument
515 vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *)) vm_install_exception_handler() argument
633 vm_vaddr_populate_bitmap(struct kvm_vm * vm) vm_vaddr_populate_bitmap() argument
[all...]
/linux/tools/testing/selftests/kvm/s390x/
H A Dcmma_test.c97 static void create_main_memslot(struct kvm_vm *vm) in create_main_memslot() argument
101 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0); in create_main_memslot()
104 vm->memslots[i] = 0; in create_main_memslot()
107 static void create_test_memslot(struct kvm_vm *vm) in create_test_memslot() argument
109 vm_userspace_mem_region_add(vm, in create_test_memslot()
111 TEST_DATA_START_GFN << vm->page_shift, in create_test_memslot()
116 vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT; in create_test_memslot()
119 static void create_memslots(struct kvm_vm *vm) in create_memslots() argument
135 create_main_memslot(vm); in create_memslots()
136 create_test_memslot(vm); in create_memslots()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_mmu.c46 struct panthor_vm *vm; member
101 } vm; member
405 struct panthor_vm *vm; member
437 struct panthor_vm *vm = cookie; in alloc_pt() local
441 if (unlikely(!vm->root_page_table)) { in alloc_pt()
444 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
445 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
448 vm->root_page_table = page; in alloc_pt()
455 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
461 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.c121 struct amdgpu_vm *vm; member
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
145 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
148 if (vm->pasid) { in amdgpu_vm_set_pasid()
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
153 vm->pasid = 0; in amdgpu_vm_set_pasid()
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
162 vm->pasid = pasid; in amdgpu_vm_set_pasid()
179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
183 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_vm.h32 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get() argument
34 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
35 return vm; in xe_vm_get()
38 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
40 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
43 int xe_vm_lock(struct xe_vm *vm, bool intr);
45 void xe_vm_unlock(struct xe_vm *vm);
47 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
50 return !vm->size; in xe_vm_is_closed()
53 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument
[all …]
H A Dxe_vm.c43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
45 return vm->gpuvm.r_obj; in xe_vm_obj()
70 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
71 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
73 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
79 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
83 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
84 xe_vm_assert_held(vm); in preempt_fences_waiting()
86 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
[all …]
/linux/drivers/gpu/drm/lima/
H A Dlima_vm.c18 struct lima_vm *vm; member
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument
43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range()
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument
52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page()
57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page()
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page()
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page()
60 if (!vm->bts[pbe].cpu) in lima_vm_map_page()
63 pts = vm->bts[pbe].dma; in lima_vm_map_page()
[all …]
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c182 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument
184 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
185 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
187 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
188 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
191 static void vm_open(struct kvm_vm *vm) in vm_open() argument
193 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
197 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm in vm_open()
259 vm_vaddr_populate_bitmap(struct kvm_vm * vm) vm_vaddr_populate_bitmap() argument
270 struct kvm_vm *vm; ____vm_create() local
415 struct kvm_vm *vm; __vm_create() local
473 struct kvm_vm *vm; __vm_create_with_vcpus() local
492 struct kvm_vm *vm; __vm_create_shape_with_one_vcpu() local
535 vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id) vm_arch_vcpu_recreate() argument
541 vm_recreate_with_one_vcpu(struct kvm_vm * vm) vm_recreate_with_one_vcpu() argument
638 userspace_mem_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end) userspace_mem_region_find() argument
677 vm_vcpu_rm(struct kvm_vm * vm,struct kvm_vcpu * vcpu) vm_vcpu_rm() argument
714 __vm_mem_region_delete(struct kvm_vm * vm,struct userspace_mem_region * region) __vm_mem_region_delete() argument
846 __vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva) __vm_set_user_memory_region() argument
860 vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva) vm_set_user_memory_region() argument
873 __vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset) __vm_set_user_memory_region2() argument
892 vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset) vm_set_user_memory_region2() argument
905 vm_mem_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags,int guest_memfd,uint64_t guest_memfd_offset) vm_mem_add() argument
1084 vm_userspace_mem_region_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags) vm_userspace_mem_region_add() argument
1108 memslot2region(struct kvm_vm * vm,uint32_t memslot) memslot2region() argument
1139 vm_mem_region_set_flags(struct kvm_vm * vm,uint32_t slot,uint32_t flags) vm_mem_region_set_flags() argument
1169 vm_mem_region_move(struct kvm_vm * vm,uint32_t slot,uint64_t new_gpa) vm_mem_region_move() argument
1198 vm_mem_region_delete(struct kvm_vm * vm,uint32_t slot) vm_mem_region_delete() argument
1203 vm_guest_mem_fallocate(struct kvm_vm * vm,uint64_t base,uint64_t size,bool punch_hole) vm_guest_mem_fallocate() argument
1247 vcpu_exists(struct kvm_vm * vm,uint32_t vcpu_id) vcpu_exists() argument
1263 __vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id) __vm_vcpu_add() argument
1313 vm_vaddr_unused_gap(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min) vm_vaddr_unused_gap() argument
1379 ____vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type,bool protected) ____vm_vaddr_alloc() argument
1409 __vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type) __vm_vaddr_alloc() argument
1416 vm_vaddr_alloc_shared(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type) vm_vaddr_alloc_shared() argument
1442 vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min) vm_vaddr_alloc() argument
1461 vm_vaddr_alloc_pages(struct kvm_vm * vm,int nr_pages) vm_vaddr_alloc_pages() argument
1466 __vm_vaddr_alloc_page(struct kvm_vm * vm,enum kvm_mem_region_type type) __vm_vaddr_alloc_page() argument
1485 vm_vaddr_alloc_page(struct kvm_vm * vm) vm_vaddr_alloc_page() argument
1506 virt_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,unsigned int npages) virt_map() argument
1541 addr_gpa2hva(struct kvm_vm * vm,vm_paddr_t gpa) addr_gpa2hva() argument
1574 addr_hva2gpa(struct kvm_vm * vm,void * hva) addr_hva2gpa() argument
1617 addr_gpa2alias(struct kvm_vm * vm,vm_paddr_t gpa) addr_gpa2alias() argument
1634 vm_create_irqchip(struct kvm_vm * vm) vm_create_irqchip() argument
1741 __kvm_test_create_device(struct kvm_vm * vm,uint64_t type) __kvm_test_create_device() argument
1751 __kvm_create_device(struct kvm_vm * vm,uint64_t type) __kvm_create_device() argument
1793 _kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level) _kvm_irq_line() argument
1803 kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level) kvm_irq_line() argument
1841 _kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing) _kvm_gsi_routing_write() argument
1852 kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing) kvm_gsi_routing_write() argument
1875 vm_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent) vm_dump() argument
2010 __vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot,bool protected) __vm_phy_pages_alloc() argument
2056 vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot) vm_phy_page_alloc() argument
2062 vm_alloc_page_table(struct kvm_vm * vm) vm_alloc_page_table() argument
2080 addr_gva2hva(struct kvm_vm * vm,vm_vaddr_t gva) addr_gva2hva() argument
2085 vm_compute_max_gfn(struct kvm_vm * vm) vm_compute_max_gfn() argument
2211 __vm_get_stat(struct kvm_vm * vm,const char * stat_name,uint64_t * data,size_t max_elements) __vm_get_stat() argument
2240 kvm_arch_vm_post_create(struct kvm_vm * vm) kvm_arch_vm_post_create() argument
2259 vm_is_gpa_protected(struct kvm_vm * vm,vm_paddr_t paddr) vm_is_gpa_protected() argument
[all...]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gtt.c41 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument
57 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, in alloc_pt_lmem()
58 vm->lmem_pt_obj_flags); in alloc_pt_lmem()
65 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem()
66 obj->shares_resv_from = vm; in alloc_pt_lmem()
68 if (vm->fpriv) in alloc_pt_lmem()
69 i915_drm_client_add_object(vm->fpriv->client, obj); in alloc_pt_lmem()
75 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
79 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
80 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
[all …]
H A Dintel_ggtt.c57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
61 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
114 void i915_ggtt_suspend_vm(struct i915_address_space *vm) in i915_ggtt_suspend_vm() argument
119 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
122 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
124 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
[all …]
H A Dgen8_ppgtt.c90 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt()
91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt()
102 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt()
180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument
182 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count()
184 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count()
188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument
190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index()
192 if (vm->top == 2) in gen8_pdp_for_page_index()
195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index()
[all …]
H A Dintel_gtt.h64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
233 void (*bind_vma)(struct i915_address_space *vm,
242 void (*unbind_vma)(struct i915_address_space *vm,
308 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
310 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
318 void (*allocate_va_range)(struct i915_address_space *vm,
321 void (*clear_range)(struct i915_address_space *vm,
323 void (*scratch_range)(struct i915_address_space *vm,
325 void (*insert_page)(struct i915_address_space *vm,
330 void (*insert_entries)(struct i915_address_space *vm,
[all …]
/linux/tools/testing/selftests/kvm/lib/x86_64/
H A Dsev.c17 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) in encrypt_region() argument
21 const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; in encrypt_region()
27 sev_register_encrypted_memory(vm, region); in encrypt_region()
30 const uint64_t size = (j - i + 1) * vm->page_size; in encrypt_region()
31 const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; in encrypt_region()
33 sev_launch_update_data(vm, gpa_base + offset, size); in encrypt_region()
37 void sev_vm_init(struct kvm_vm *vm) in sev_vm_init() argument
39 if (vm->type == KVM_X86_DEFAULT_VM) { in sev_vm_init()
40 assert(vm->arch.sev_fd == -1); in sev_vm_init()
41 vm->arch.sev_fd = open_sev_dev_path_or_exit(); in sev_vm_init()
[all …]
/linux/tools/testing/selftests/kvm/lib/s390x/
H A Dprocessor.c13 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc()
18 vm->page_size); in virt_arch_pgd_alloc()
20 if (vm->pgd_created) in virt_arch_pgd_alloc()
23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_arch_pgd_alloc()
25 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc()
28 vm->pgd = paddr; in virt_arch_pgd_alloc()
29 vm->pgd_created = true; in virt_arch_pgd_alloc()
37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument
[all …]
/linux/drivers/virt/acrn/
H A Dvm.c25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument
37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create()
38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create()
39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create()
40 vm->vmid = vm_param->vmid; in acrn_vm_create()
41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create()
43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create()
45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create()
50 list_add(&vm->list, &acrn_vm_list); in acrn_vm_create()
53 acrn_ioeventfd_init(vm); in acrn_vm_create()
[all …]
H A Dirqfd.c32 struct acrn_vm *vm; member
43 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local
45 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject()
53 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown()
65 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local
68 vm = irqfd->vm; in hsm_irqfd_shutdown_work()
69 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
72 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
81 struct acrn_vm *vm; in hsm_irqfd_wakeup() local
84 vm = irqfd->vm; in hsm_irqfd_wakeup()
[all …]
H A Dioreq.c39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument
64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request()
79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request()
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request()
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request()
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument
97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
98 if (vm->default_client) in acrn_ioreq_request_default_complete()
99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete()
101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument
51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument
56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument
73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt()
74 ppgtt->vm.i915 = i915; in mock_ppgtt()
75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt()
[all …]
/linux/sound/pci/ctxfi/
H A Dctvmem.c30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
42 mutex_lock(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
53 list_move(&entry->list, &vm->used); in get_vm_block()
54 vm->size -= size; in get_vm_block()
65 list_add(&block->list, &vm->used); in get_vm_block()
68 vm->size -= size; in get_vm_block()
71 mutex_unlock(&vm->lock); in get_vm_block()
[all …]
/linux/drivers/gpu/drm/i915/display/
H A Dintel_dpt.c17 struct i915_address_space vm; member
24 #define i915_is_dpt(vm) ((vm)->is_dpt) argument
27 i915_vm_to_dpt(struct i915_address_space *vm) in i915_vm_to_dpt() argument
29 BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); in i915_vm_to_dpt()
30 drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm)); in i915_vm_to_dpt()
31 return container_of(vm, struct i915_dpt, vm); in i915_vm_to_dpt()
34 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
41 static void dpt_insert_page(struct i915_address_space *vm, in dpt_insert_page() argument
47 struct i915_dpt *dpt = i915_vm_to_dpt(vm); in dpt_insert_page()
51 vm->pte_encode(addr, pat_index, flags)); in dpt_insert_page()
[all …]
/linux/tools/testing/selftests/kvm/include/
H A Dkvm_util.h53 struct kvm_vm *vm; member
137 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument
138 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
139 if (!((vcpu) = vm->vcpus[i])) \
144 memslot2region(struct kvm_vm *vm, uint32_t memslot);
146 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument
150 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region()
293 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument
295 #define __vm_ioctl(vm, cmd, arg) \ argument
297 static_assert_is_vm(vm); \
[all …]
/linux/tools/testing/selftests/kvm/aarch64/
H A Dsmccc_filter.c40 static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, in __set_smccc_filter() argument
49 return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL, in __set_smccc_filter()
53 static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, in set_smccc_filter() argument
56 int ret = __set_smccc_filter(vm, start, nr_functions, action); in set_smccc_filter()
64 struct kvm_vm *vm; in setup_vm() local
66 vm = vm_create(1); in setup_vm()
67 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); in setup_vm()
75 *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main); in setup_vm()
76 return vm; in setup_vm()
82 struct kvm_vm *vm = setup_vm(&vcpu); in test_pad_must_be_zero() local
[all …]
/linux/tools/testing/selftests/kvm/lib/riscv/
H A Dprocessor.c29 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
34 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument
40 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
63 TEST_ASSERT(level < vm->pgtable_levels, in pte_index()
69 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc()
73 if (vm->pgd_created) in virt_arch_pgd_alloc()
76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc()
[all …]
/linux/tools/testing/selftests/kvm/
H A Dset_memory_region_test.c113 struct kvm_vm *vm; in spawn_vm() local
117 vm = vm_create_with_one_vcpu(vcpu, guest_code); in spawn_vm()
119 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, in spawn_vm()
127 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); in spawn_vm()
130 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); in spawn_vm()
133 hva = addr_gpa2hva(vm, MEM_REGION_GPA); in spawn_vm()
141 return vm; in spawn_vm()
182 struct kvm_vm *vm; in test_move_memory_region() local
185 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); in test_move_memory_region()
188 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL); in test_move_memory_region()
[all …]

12345678910>>...21