Lines Matching defs:vm
57 struct drm_i915_private *i915 = ggtt->vm.i915;
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
61 ggtt->vm.is_ggtt = true;
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
73 ggtt->vm.cleanup(&ggtt->vm);
109 * @vm: The VM to suspend the mappings for
115 void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
120 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
123 i915_gem_drain_freed_objects(vm->i915);
125 mutex_lock(&vm->mutex);
131 save_skip_rewrite = vm->skip_pte_rewrite;
132 vm->skip_pte_rewrite = true;
134 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
150 mutex_unlock(&vm->mutex);
157 vm->skip_pte_rewrite = save_skip_rewrite;
171 vm->clear_range(vm, 0, vm->total);
173 vm->skip_pte_rewrite = save_skip_rewrite;
175 mutex_unlock(&vm->mutex);
177 drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
184 i915_ggtt_suspend_vm(&ggtt->vm, false);
193 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
218 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
226 if (needs_wc_ggtt_mapping(ggtt->vm.i915))
242 struct drm_i915_private *i915 = ggtt->vm.i915;
302 struct intel_gt *gt = ggtt->vm.gt;
310 struct intel_gt *gt = ggtt->vm.gt;
344 struct intel_gt *gt = ggtt->vm.gt;
345 const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
451 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
457 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
461 gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
466 static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
469 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
473 return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
476 static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
480 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
483 pte = ggtt->vm.pte_encode(addr, pat_index, flags);
484 if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
488 gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
491 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
496 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
497 const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
512 gen8_set_pte(gte++, vm->scratch[0]->encode);
521 gen8_set_pte(gte++, vm->scratch[0]->encode);
530 static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
534 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
535 gen8_pte_t scratch_pte = vm->scratch[0]->encode;
539 pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
561 static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
565 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
567 if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
568 __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
571 gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
574 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
577 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
580 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
595 static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
598 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
601 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
613 gen8_ggtt_clear_range(vm, start, length);
616 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
622 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
626 iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
631 static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
635 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
639 return vm->pte_decode(ioread32(pte), is_present, is_local);
648 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
653 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
664 iowrite32(vm->scratch[0]->encode, gte++);
667 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
672 iowrite32(vm->scratch[0]->encode, gte++);
681 static void nop_clear_range(struct i915_address_space *vm,
686 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
695 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
699 struct i915_address_space *vm;
709 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
711 bxt_vtd_ggtt_wa(arg->vm);
716 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
722 struct insert_page arg = { vm, addr, offset, pat_index };
728 struct i915_address_space *vm;
738 gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
740 bxt_vtd_ggtt_wa(arg->vm);
745 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
750 struct insert_entries arg = { vm, vma_res, pat_index, flags };
755 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
758 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
771 scratch_pte = vm->scratch[0]->encode;
776 void intel_ggtt_bind_vma(struct i915_address_space *vm,
796 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
800 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
803 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
806 dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
809 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
811 return ggtt->vm.read_entry(vm, offset, is_present, is_local);
830 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
833 GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
834 offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
836 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
840 drm_dbg(&ggtt->vm.i915->drm,
883 intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
915 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
916 drm_mm_insert_node_in_range(&ggtt->vm.mm,
927 ggtt->vm.scratch_range(&ggtt->vm, start, size);
928 drm_dbg(&ggtt->vm.i915->drm,
943 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
944 drm_dbg(&ggtt->vm.i915->drm,
947 ggtt->vm.clear_range(&ggtt->vm, hole_start,
952 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
961 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
975 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
979 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
984 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
988 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
991 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
1000 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
1004 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
1009 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
1013 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
1014 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
1015 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
1025 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
1028 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
1030 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
1031 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
1033 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
1034 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
1036 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
1040 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
1042 i915_vm_put(&ppgtt->vm);
1054 i915_vm_put(&ppgtt->vm);
1056 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1057 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1081 flush_workqueue(ggtt->vm.i915->wq);
1082 i915_gem_drain_freed_objects(ggtt->vm.i915);
1084 mutex_lock(&ggtt->vm.mutex);
1086 ggtt->vm.skip_pte_rewrite = true;
1088 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
1107 ggtt->vm.cleanup(&ggtt->vm);
1109 mutex_unlock(&ggtt->vm.mutex);
1110 i915_address_space_fini(&ggtt->vm);
1141 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
1142 dma_resv_fini(&ggtt->vm._resv);
1196 struct drm_i915_private *i915 = ggtt->vm.i915;
1197 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
1222 kref_init(&ggtt->vm.resv_ref);
1223 ret = setup_scratch_page(&ggtt->vm);
1232 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
1235 ggtt->vm.scratch[0]->encode =
1236 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
1244 static void gen6_gmch_remove(struct i915_address_space *vm)
1246 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1249 free_scratch(vm);
1260 struct drm_i915_private *i915 = ggtt->vm.i915;
1279 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1280 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1281 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
1283 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
1284 ggtt->vm.cleanup = gen6_gmch_remove;
1285 ggtt->vm.insert_page = gen8_ggtt_insert_page;
1286 ggtt->vm.clear_range = nop_clear_range;
1287 ggtt->vm.scratch_range = gen8_ggtt_clear_range;
1289 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
1290 ggtt->vm.read_entry = gen8_ggtt_read_entry;
1297 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
1298 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
1306 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1307 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
1309 ggtt->vm.bind_async_flags =
1314 ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
1315 ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
1316 ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
1321 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1324 if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
1329 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1330 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1333 ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
1335 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
1337 ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
1448 struct drm_i915_private *i915 = ggtt->vm.i915;
1473 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1475 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1476 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1478 ggtt->vm.clear_range = nop_clear_range;
1480 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1481 ggtt->vm.scratch_range = gen6_ggtt_clear_range;
1482 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1483 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1484 ggtt->vm.read_entry = gen6_ggtt_read_entry;
1485 ggtt->vm.cleanup = gen6_gmch_remove;
1490 ggtt->vm.pte_encode = iris_pte_encode;
1492 ggtt->vm.pte_encode = hsw_pte_encode;
1494 ggtt->vm.pte_encode = byt_pte_encode;
1496 ggtt->vm.pte_encode = ivb_pte_encode;
1498 ggtt->vm.pte_encode = snb_pte_encode;
1500 ggtt->vm.pte_decode = gen6_pte_decode;
1502 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1503 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1513 ggtt->vm.gt = gt;
1514 ggtt->vm.i915 = i915;
1515 ggtt->vm.dma = i915->drm.dev;
1516 dma_resv_init(&ggtt->vm._resv);
1526 dma_resv_fini(&ggtt->vm._resv);
1530 if ((ggtt->vm.total - 1) >> 32) {
1534 ggtt->vm.total >> 20);
1535 ggtt->vm.total = 1ULL << 32;
1537 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1540 if (ggtt->mappable_end > ggtt->vm.total) {
1544 &ggtt->mappable_end, ggtt->vm.total);
1545 ggtt->mappable_end = ggtt->vm.total;
1549 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1606 * @vm: The VM to restore the mappings for
1615 bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
1620 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1623 drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
1628 vm->clear_range(vm, 0, vm->total);
1631 list_for_each_entry(vma, &vm->bound_list, vm_link) {
1643 vma->ops->bind_vma(vm, NULL, vma->resource,
1645 i915_gem_get_pat_index(vm->i915,
1666 flush = i915_ggtt_resume_vm(&ggtt->vm, false);
1669 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,