Lines Matching defs:vm
41 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
57 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
58 vm->lmem_pt_obj_flags);
60 * Ensure all paging structures for this vm share the same dma-resv
65 obj->base.resv = i915_vm_resv_get(vm);
66 obj->shares_resv_from = vm;
68 if (vm->fpriv)
69 i915_drm_client_add_object(vm->fpriv->client, obj);
75 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
79 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
80 i915_gem_shrink_all(vm->i915);
82 obj = i915_gem_object_create_internal(vm->i915, sz);
84 * Ensure all paging structures for this vm share the same dma-resv
89 obj->base.resv = i915_vm_resv_get(vm);
90 obj->shares_resv_from = vm;
92 if (vm->fpriv)
93 i915_drm_client_add_object(vm->fpriv->client, obj);
99 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
104 type = intel_gt_coherent_map_type(vm->gt, obj, true);
112 if (IS_METEORLAKE(vm->i915))
123 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
128 type = intel_gt_coherent_map_type(vm->gt, obj, true);
136 if (IS_METEORLAKE(vm->i915))
170 * Delay the vm and vm mutex freeing until the
173 i915_vm_resv_get(vma->vm);
182 static void __i915_vm_close(struct i915_address_space *vm)
184 mutex_lock(&vm->mutex);
186 clear_vm_list(&vm->bound_list);
187 clear_vm_list(&vm->unbound_list);
190 GEM_BUG_ON(!list_empty(&vm->bound_list));
191 GEM_BUG_ON(!list_empty(&vm->unbound_list));
193 mutex_unlock(&vm->mutex);
196 /* lock the vm into the current ww, if we lock one, we lock all */
197 int i915_vm_lock_objects(struct i915_address_space *vm,
200 if (vm->scratch[0]->base.resv == &vm->_resv) {
201 return i915_gem_object_lock(vm->scratch[0], ww);
203 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
210 void i915_address_space_fini(struct i915_address_space *vm)
212 drm_mm_takedown(&vm->mm);
225 struct i915_address_space *vm =
226 container_of(kref, typeof(*vm), resv_ref);
228 dma_resv_fini(&vm->_resv);
229 mutex_destroy(&vm->mutex);
231 kfree(vm);
236 struct i915_address_space *vm =
239 __i915_vm_close(vm);
242 i915_vma_resource_bind_dep_sync_all(vm);
244 vm->cleanup(vm);
245 i915_address_space_fini(vm);
247 i915_vm_resv_put(vm);
252 struct i915_address_space *vm =
255 GEM_BUG_ON(i915_is_ggtt(vm));
256 trace_i915_ppgtt_release(vm);
258 queue_work(vm->i915->wq, &vm->release_work);
261 void i915_address_space_init(struct i915_address_space *vm, int subclass)
263 kref_init(&vm->ref);
269 if (!kref_read(&vm->resv_ref))
270 kref_init(&vm->resv_ref);
272 vm->pending_unbind = RB_ROOT_CACHED;
273 INIT_WORK(&vm->release_work, __i915_vm_release);
276 * The vm->mutex must be reclaim safe (for use in the shrinker).
280 mutex_init(&vm->mutex);
281 lockdep_set_subclass(&vm->mutex, subclass);
283 if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
284 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
288 * which is allowed to allocate memory. This means &vm->mutex
294 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
296 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
298 dma_resv_init(&vm->_resv);
300 GEM_BUG_ON(!vm->total);
301 drm_mm_init(&vm->mm, 0, vm->total);
303 memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
304 ARRAY_SIZE(vm->min_alignment));
306 if (HAS_64K_PAGES(vm->i915)) {
307 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
308 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
311 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
313 INIT_LIST_HEAD(&vm->bound_list);
314 INIT_LIST_HEAD(&vm->unbound_list);
359 int setup_scratch_page(struct i915_address_space *vm)
371 * scratch (read-only) between all vm, we create one 64k scratch page
375 if (i915_vm_is_4lvl(vm) &&
376 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
377 !HAS_64K_PAGES(vm->i915))
383 obj = vm->alloc_scratch_dma(vm, size);
387 if (map_pt_dma(vm, obj))
409 vm->scratch[0] = obj;
410 vm->scratch_order = get_order(size);
423 void free_scratch(struct i915_address_space *vm)
427 if (!vm->scratch[0])
430 for (i = 0; i <= vm->top; i++)
431 i915_gem_object_put(vm->scratch[i]);
695 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
700 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
706 vma = i915_vma_instance(obj, vm, NULL);
716 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
721 vma = __vm_create_scratch_for_read(vm, size);