| /linux/tools/testing/selftests/kvm/lib/arm64/ |
| H A D | processor.c | 24 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument 26 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 29 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 31 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index() 32 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index() 37 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 39 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index() 40 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index() 42 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index() 43 "Mode %d does not have 4 page table levels", vm->mode); in pud_index() [all …]
|
| /linux/tools/testing/selftests/kvm/s390/ |
| H A D | cmma_test.c | 97 static void create_main_memslot(struct kvm_vm *vm) in create_main_memslot() argument 101 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0); in create_main_memslot() 104 vm->memslots[i] = 0; in create_main_memslot() 107 static void create_test_memslot(struct kvm_vm *vm) in create_test_memslot() argument 109 vm_userspace_mem_region_add(vm, in create_test_memslot() 111 TEST_DATA_START_GFN << vm->page_shift, in create_test_memslot() 116 vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT; in create_test_memslot() 119 static void create_memslots(struct kvm_vm *vm) in create_memslots() argument 135 create_main_memslot(vm); in create_memslots() 136 create_test_memslot(vm); in create_memslots() 139 finish_vm_setup(struct kvm_vm * vm) finish_vm_setup() argument 153 struct kvm_vm *vm; create_vm_two_memslots() local 164 enable_cmma(struct kvm_vm * vm) enable_cmma() argument 172 enable_dirty_tracking(struct kvm_vm * vm) enable_dirty_tracking() argument 178 __enable_migration_mode(struct kvm_vm * vm) __enable_migration_mode() argument 187 enable_migration_mode(struct kvm_vm * vm) enable_migration_mode() argument 194 is_migration_mode_on(struct kvm_vm * vm) is_migration_mode_on() argument 208 vm_get_cmma_bits(struct kvm_vm * vm,u64 flags,int * errno_out) vm_get_cmma_bits() argument 229 struct kvm_vm *vm = create_vm_two_memslots(); test_get_cmma_basic() local 274 struct kvm_vm *vm = vm_create_barebones(); test_migration_mode() local 376 assert_all_slots_cmma_dirty(struct kvm_vm * vm) assert_all_slots_cmma_dirty() argument 426 assert_no_pages_cmma_dirty(struct kvm_vm * vm) assert_no_pages_cmma_dirty() argument 449 struct kvm_vm *vm = create_vm_two_memslots(); test_get_initial_dirty() local 473 query_cmma_range(struct kvm_vm * vm,u64 start_gfn,u64 gfn_count,struct kvm_s390_cmma_log * res_out) query_cmma_range() argument 506 struct kvm_vm *vm = create_vm_two_memslots(); test_get_skip_holes() local 668 struct kvm_vm *vm = vm_create_barebones(); machine_has_cmma() local [all...] |
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_vm.h | 34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get() argument 36 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get() 37 return vm; in xe_vm_get() 40 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument 42 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put() 45 int xe_vm_lock(struct xe_vm *vm, bool intr); 47 void xe_vm_unlock(struct xe_vm *vm); 49 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument 52 return !vm->size; in xe_vm_is_closed() 55 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument [all …]
|
| H A D | xe_svm.c | 92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range() argument 95 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range() 101 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range() 104 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range() 105 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range() 107 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range() 116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_begin() argument 121 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin() 126 xe_svm_assert_in_notifier(vm); in xe_svm_range_notifier_event_begin() 146 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_vm.c | 122 struct amdgpu_vm *vm; member 136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) in amdgpu_vm_assert_locked() argument 138 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_assert_locked() 151 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local 155 amdgpu_vm_assert_locked(vm); in amdgpu_vm_bo_evicted() 156 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted() 158 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted() 160 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted() 161 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted() 173 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_moved() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/loongarch/ |
| H A D | processor.c | 17 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in virt_pte_index() argument 22 shift = level * (vm->page_shift - 3) + vm->page_shift; in virt_pte_index() 23 mask = (1UL << (vm->page_shift - 3)) - 1; in virt_pte_index() 27 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument 29 return entry & ~((0x1UL << vm->page_shift) - 1); in pte_addr() 32 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument 34 return 1 << (vm->page_shift - 3); in ptrs_per_pte() 37 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) in virt_set_pgtable() argument 42 ptep = addr_gpa2hva(vm, table); in virt_set_pgtable() 43 ptrs_per_pte = 1 << (vm->page_shift - 3); in virt_set_pgtable() [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_mmu.c | 48 struct panthor_vm *vm; member 104 } vm; member 394 struct panthor_vm *vm; member 426 struct panthor_vm *vm = cookie; in alloc_pt() local 430 if (unlikely(!vm->root_page_table)) { in alloc_pt() 433 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt() 434 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt() 437 vm->root_page_table = page; in alloc_pt() 444 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt() 450 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt() [all …]
|
| /linux/drivers/gpu/drm/lima/ |
| H A D | lima_vm.c | 18 struct lima_vm *vm; member 35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument 43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range() 47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument 52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page() 57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page() 58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page() 59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page() 60 if (!vm->bts[pbe].cpu) in lima_vm_map_page() 63 pts = vm->bts[pbe].dma; in lima_vm_map_page() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/s390/ |
| H A D | processor.c | 13 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc() 18 vm->page_size); in virt_arch_pgd_alloc() 20 if (vm->pgd_created) in virt_arch_pgd_alloc() 23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_arch_pgd_alloc() 25 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc() 26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc() 28 vm->pgd = paddr; in virt_arch_pgd_alloc() 29 vm->pgd_created = true; in virt_arch_pgd_alloc() 37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument [all …]
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | kvm_util.c | 168 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument 170 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring() 171 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring() 173 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring() 174 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring() 177 static void vm_open(struct kvm_vm *vm) in vm_open() argument 179 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open() 183 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open() 184 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open() 187 vm->stats.fd = vm_get_stats_fd(vm); in vm_open() [all …]
|
| /linux/drivers/virt/acrn/ |
| H A D | vm.c | 25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument 37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create() 38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create() 39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create() 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create() 43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create() 45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create() 50 list_add(&vm->list, &acrn_vm_list); in acrn_vm_create() 53 acrn_ioeventfd_init(vm); in acrn_vm_create() [all …]
|
| H A D | irqfd.c | 30 struct acrn_vm *vm; member 41 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local 43 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject() 51 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown() 63 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local 66 vm = irqfd->vm; in hsm_irqfd_shutdown_work() 67 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 70 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 79 struct acrn_vm *vm; in hsm_irqfd_wakeup() local 82 vm = irqfd->vm; in hsm_irqfd_wakeup() [all …]
|
| H A D | ioreq.c | 39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument 64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request() 79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request() 84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request() 88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request() 93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument 97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() 98 if (vm->default_client) in acrn_ioreq_request_default_complete() 99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete() 101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() [all …]
|
| H A D | ioeventfd.c | 43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) in acrn_ioeventfd_shutdown() argument 45 lockdep_assert_held(&vm->ioeventfds_lock); in acrn_ioeventfd_shutdown() 52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, in hsm_ioeventfd_is_conflict() argument 57 lockdep_assert_held(&vm->ioeventfds_lock); in hsm_ioeventfd_is_conflict() 60 list_for_each_entry(p, &vm->ioeventfds, list) in hsm_ioeventfd_is_conflict() 76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, in acrn_ioeventfd_assign() argument 121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 123 if (hsm_ioeventfd_is_conflict(vm, p)) { in acrn_ioeventfd_assign() 129 ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type, in acrn_ioeventfd_assign() 134 list_add_tail(&p->list, &vm->ioeventfds); in acrn_ioeventfd_assign() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | mock_gtt.c | 27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument 35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument 41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument 51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument 56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument 60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument 73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt() 74 ppgtt->vm.i915 = i915; in mock_ppgtt() 75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt() 76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | gen8_ppgtt.c | 90 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt() 91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt() 102 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt() 180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument 182 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count() 184 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count() 188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument 190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index() 192 if (vm->top == 2) in gen8_pdp_for_page_index() 195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index() [all …]
|
| H A D | gen6_ppgtt.c | 23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde() 74 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, in gen6_ppgtt_clear_range() argument 77 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_ppgtt_clear_range() 79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range() 110 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, in gen6_ppgtt_insert_entries() argument 115 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in gen6_ppgtt_insert_entries() 120 const u32 pte_encode = vm->pte_encode(0, pat_index, flags); in gen6_ppgtt_insert_entries() 166 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); in gen6_flush_pd() 172 static void gen6_alloc_va_range(struct i915_address_space *vm, in gen6_alloc_va_range() argument 176 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_alloc_va_range() [all …]
|
| /linux/tools/testing/selftests/kvm/include/ |
| H A D | kvm_util.h | 63 struct kvm_vm *vm; member 148 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument 149 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 150 if (!((vcpu) = vm->vcpus[i])) \ 155 memslot2region(struct kvm_vm *vm, uint32_t memslot); 157 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument 161 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region() 313 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument 315 #define __vm_ioctl(vm, cmd, arg) \ argument 317 static_assert_is_vm(vm); \ [all …]
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_vm.c | 130 struct radeon_vm *vm, in radeon_vm_get_bos() argument 136 list = kvmalloc_array(vm->max_pde_used + 2, in radeon_vm_get_bos() 142 list[0].robj = vm->page_directory; in radeon_vm_get_bos() 149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos() 150 if (!vm->page_tables[i].bo) in radeon_vm_get_bos() 153 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos() 177 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument 180 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id() 236 struct radeon_vm *vm, in radeon_vm_flush() argument 239 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); in radeon_vm_flush() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/riscv/ |
| H A D | processor.c | 29 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument 31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 34 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument 40 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument 59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument 63 TEST_ASSERT(level < vm->pgtable_levels, in pte_index() 69 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc() 73 if (vm->pgd_created) in virt_arch_pgd_alloc() 76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc() [all …]
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | set_memory_region_test.c | 113 struct kvm_vm *vm; in spawn_vm() local 117 vm = vm_create_with_one_vcpu(vcpu, guest_code); in spawn_vm() 119 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, in spawn_vm() 127 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); in spawn_vm() 130 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); in spawn_vm() 133 hva = addr_gpa2hva(vm, MEM_REGION_GPA); in spawn_vm() 141 return vm; in spawn_vm() 182 struct kvm_vm *vm; in test_move_memory_region() local 185 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); in test_move_memory_region() 188 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL); in test_move_memory_region() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem_vma.c | 112 struct drm_gpuvm *vm; member 161 struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base); in msm_gem_vm_free() local 163 drm_mm_takedown(&vm->mm); in msm_gem_vm_free() 164 if (vm->mmu) in msm_gem_vm_free() 165 vm->mmu->funcs->destroy(vm->mmu); in msm_gem_vm_free() 166 dma_fence_put(vm->last_fence); in msm_gem_vm_free() 167 put_pid(vm->pid); in msm_gem_vm_free() 168 kfree(vm->log); in msm_gem_vm_free() 169 kfree(vm); in msm_gem_vm_free() 179 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vm_unusable() local [all …]
|
| /linux/tools/testing/selftests/kvm/lib/x86/ |
| H A D | processor.c | 159 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 161 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, in virt_arch_pgd_alloc() 162 "Unknown or unsupported guest mode: 0x%x", vm->mode); in virt_arch_pgd_alloc() 165 if (!vm->pgd_created) { in virt_arch_pgd_alloc() 166 vm->pgd = vm_alloc_page_table(vm); in virt_arch_pgd_alloc() 167 vm->pgd_created = true; in virt_arch_pgd_alloc() 171 static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, in virt_get_pte() argument 175 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); in virt_get_pte() 178 TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd, in virt_get_pte() 185 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, in virt_create_upper_pte() argument [all …]
|
| H A D | vmx.c | 73 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument 75 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 76 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() 79 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 80 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 81 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 84 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 85 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 86 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 89 vmx->msr = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() [all …]
|
| /linux/drivers/gpu/drm/imx/dcss/ |
| H A D | dcss-ss.c | 120 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, in dcss_ss_sync_set() argument 129 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 130 vm->hactive - 1; in dcss_ss_sync_set() 131 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + in dcss_ss_sync_set() 132 vm->vactive - 1; in dcss_ss_sync_set() 136 hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 137 vm->hactive - 1; in dcss_ss_sync_set() 138 hsync_end = vm->hsync_len - 1; in dcss_ss_sync_set() 144 vsync_start = vm->vfront_porch - 1; in dcss_ss_sync_set() 145 vsync_end = vm->vfront_porch + vm->vsync_len - 1; in dcss_ss_sync_set() [all …]
|