xref: /linux/drivers/gpu/drm/i915/display/intel_dpt.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_domain.h"
7 #include "gem/i915_gem_internal.h"
8 #include "gem/i915_gem_lmem.h"
9 #include "gt/gen8_ppgtt.h"
10 
11 #include "i915_drv.h"
12 #include "intel_display_types.h"
13 #include "intel_dpt.h"
14 #include "intel_fb.h"
15 
16 struct i915_dpt {
17 	struct i915_address_space vm;
18 
19 	struct drm_i915_gem_object *obj;
20 	struct i915_vma *vma;
21 	void __iomem *iomem;
22 };
23 
24 #define i915_is_dpt(vm) ((vm)->is_dpt)
25 
26 static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space * vm)27 i915_vm_to_dpt(struct i915_address_space *vm)
28 {
29 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
30 	drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
31 	return container_of(vm, struct i915_dpt, vm);
32 }
33 
34 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
35 
gen8_set_pte(void __iomem * addr,gen8_pte_t pte)36 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
37 {
38 	writeq(pte, addr);
39 }
40 
dpt_insert_page(struct i915_address_space * vm,dma_addr_t addr,u64 offset,unsigned int pat_index,u32 flags)41 static void dpt_insert_page(struct i915_address_space *vm,
42 			    dma_addr_t addr,
43 			    u64 offset,
44 			    unsigned int pat_index,
45 			    u32 flags)
46 {
47 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
48 	gen8_pte_t __iomem *base = dpt->iomem;
49 
50 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
51 		     vm->pte_encode(addr, pat_index, flags));
52 }
53 
dpt_insert_entries(struct i915_address_space * vm,struct i915_vma_resource * vma_res,unsigned int pat_index,u32 flags)54 static void dpt_insert_entries(struct i915_address_space *vm,
55 			       struct i915_vma_resource *vma_res,
56 			       unsigned int pat_index,
57 			       u32 flags)
58 {
59 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
60 	gen8_pte_t __iomem *base = dpt->iomem;
61 	const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
62 	struct sgt_iter sgt_iter;
63 	dma_addr_t addr;
64 	int i;
65 
66 	/*
67 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
68 	 * not to allow the user to override access to a read only page.
69 	 */
70 
71 	i = vma_res->start / I915_GTT_PAGE_SIZE;
72 	for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
73 		gen8_set_pte(&base[i++], pte_encode | addr);
74 }
75 
dpt_clear_range(struct i915_address_space * vm,u64 start,u64 length)76 static void dpt_clear_range(struct i915_address_space *vm,
77 			    u64 start, u64 length)
78 {
79 }
80 
dpt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma_resource * vma_res,unsigned int pat_index,u32 flags)81 static void dpt_bind_vma(struct i915_address_space *vm,
82 			 struct i915_vm_pt_stash *stash,
83 			 struct i915_vma_resource *vma_res,
84 			 unsigned int pat_index,
85 			 u32 flags)
86 {
87 	u32 pte_flags;
88 
89 	if (vma_res->bound_flags)
90 		return;
91 
92 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
93 	pte_flags = 0;
94 	if (vm->has_read_only && vma_res->bi.readonly)
95 		pte_flags |= PTE_READ_ONLY;
96 	if (vma_res->bi.lmem)
97 		pte_flags |= PTE_LM;
98 
99 	vm->insert_entries(vm, vma_res, pat_index, pte_flags);
100 
101 	vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
102 
103 	/*
104 	 * Without aliasing PPGTT there's no difference between
105 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
106 	 * upgrade to both bound if we bind either to avoid double-binding.
107 	 */
108 	vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
109 }
110 
dpt_unbind_vma(struct i915_address_space * vm,struct i915_vma_resource * vma_res)111 static void dpt_unbind_vma(struct i915_address_space *vm,
112 			   struct i915_vma_resource *vma_res)
113 {
114 	vm->clear_range(vm, vma_res->start, vma_res->vma_size);
115 }
116 
dpt_cleanup(struct i915_address_space * vm)117 static void dpt_cleanup(struct i915_address_space *vm)
118 {
119 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
120 
121 	i915_gem_object_put(dpt->obj);
122 }
123 
intel_dpt_pin_to_ggtt(struct i915_address_space * vm,unsigned int alignment)124 struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
125 				       unsigned int alignment)
126 {
127 	struct drm_i915_private *i915 = vm->i915;
128 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
129 	intel_wakeref_t wakeref;
130 	struct i915_vma *vma;
131 	void __iomem *iomem;
132 	struct i915_gem_ww_ctx ww;
133 	u64 pin_flags = 0;
134 	int err;
135 
136 	if (i915_gem_object_is_stolen(dpt->obj))
137 		pin_flags |= PIN_MAPPABLE;
138 
139 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
140 	atomic_inc(&i915->gpu_error.pending_fb_pin);
141 
142 	for_i915_gem_ww(&ww, err, true) {
143 		err = i915_gem_object_lock(dpt->obj, &ww);
144 		if (err)
145 			continue;
146 
147 		vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
148 						  alignment, pin_flags);
149 		if (IS_ERR(vma)) {
150 			err = PTR_ERR(vma);
151 			continue;
152 		}
153 
154 		iomem = i915_vma_pin_iomap(vma);
155 		i915_vma_unpin(vma);
156 
157 		if (IS_ERR(iomem)) {
158 			err = PTR_ERR(iomem);
159 			continue;
160 		}
161 
162 		dpt->vma = vma;
163 		dpt->iomem = iomem;
164 
165 		i915_vma_get(vma);
166 	}
167 
168 	dpt->obj->mm.dirty = true;
169 
170 	atomic_dec(&i915->gpu_error.pending_fb_pin);
171 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
172 
173 	return err ? ERR_PTR(err) : vma;
174 }
175 
intel_dpt_unpin_from_ggtt(struct i915_address_space * vm)176 void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
177 {
178 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
179 
180 	i915_vma_unpin_iomap(dpt->vma);
181 	i915_vma_put(dpt->vma);
182 }
183 
184 /**
185  * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
186  * @i915: device instance
187  *
188  * Restore the memory mapping during system resume for all framebuffers which
189  * are mapped to HW via a GGTT->DPT page table. The content of these page
190  * tables are not stored in the hibernation image during S4 and S3RST->S4
191  * transitions, so here we reprogram the PTE entries in those tables.
192  *
193  * This function must be called after the mappings in GGTT have been restored calling
194  * i915_ggtt_resume().
195  */
intel_dpt_resume(struct drm_i915_private * i915)196 void intel_dpt_resume(struct drm_i915_private *i915)
197 {
198 	struct drm_framebuffer *drm_fb;
199 
200 	if (!HAS_DISPLAY(i915))
201 		return;
202 
203 	mutex_lock(&i915->drm.mode_config.fb_lock);
204 	drm_for_each_fb(drm_fb, &i915->drm) {
205 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
206 
207 		if (fb->dpt_vm)
208 			i915_ggtt_resume_vm(fb->dpt_vm);
209 	}
210 	mutex_unlock(&i915->drm.mode_config.fb_lock);
211 }
212 
213 /**
214  * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
215  * @i915: device instance
216  *
217  * Suspend the memory mapping during system suspend for all framebuffers which
218  * are mapped to HW via a GGTT->DPT page table.
219  *
220  * This function must be called before the mappings in GGTT are suspended calling
221  * i915_ggtt_suspend().
222  */
intel_dpt_suspend(struct drm_i915_private * i915)223 void intel_dpt_suspend(struct drm_i915_private *i915)
224 {
225 	struct drm_framebuffer *drm_fb;
226 
227 	if (!HAS_DISPLAY(i915))
228 		return;
229 
230 	mutex_lock(&i915->drm.mode_config.fb_lock);
231 
232 	drm_for_each_fb(drm_fb, &i915->drm) {
233 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
234 
235 		if (fb->dpt_vm)
236 			i915_ggtt_suspend_vm(fb->dpt_vm);
237 	}
238 
239 	mutex_unlock(&i915->drm.mode_config.fb_lock);
240 }
241 
242 struct i915_address_space *
intel_dpt_create(struct intel_framebuffer * fb)243 intel_dpt_create(struct intel_framebuffer *fb)
244 {
245 	struct drm_gem_object *obj = intel_fb_bo(&fb->base);
246 	struct drm_i915_private *i915 = to_i915(obj->dev);
247 	struct drm_i915_gem_object *dpt_obj;
248 	struct i915_address_space *vm;
249 	struct i915_dpt *dpt;
250 	size_t size;
251 	int ret;
252 
253 	if (intel_fb_needs_pot_stride_remap(fb))
254 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
255 	else
256 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
257 
258 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
259 
260 	dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
261 	if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
262 		dpt_obj = i915_gem_object_create_stolen(i915, size);
263 	if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
264 		drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
265 		dpt_obj = i915_gem_object_create_shmem(i915, size);
266 	}
267 	if (IS_ERR(dpt_obj))
268 		return ERR_CAST(dpt_obj);
269 
270 	ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
271 	if (!ret) {
272 		ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
273 		i915_gem_object_unlock(dpt_obj);
274 	}
275 	if (ret) {
276 		i915_gem_object_put(dpt_obj);
277 		return ERR_PTR(ret);
278 	}
279 
280 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
281 	if (!dpt) {
282 		i915_gem_object_put(dpt_obj);
283 		return ERR_PTR(-ENOMEM);
284 	}
285 
286 	vm = &dpt->vm;
287 
288 	vm->gt = to_gt(i915);
289 	vm->i915 = i915;
290 	vm->dma = i915->drm.dev;
291 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
292 	vm->is_dpt = true;
293 
294 	i915_address_space_init(vm, VM_CLASS_DPT);
295 
296 	vm->insert_page = dpt_insert_page;
297 	vm->clear_range = dpt_clear_range;
298 	vm->insert_entries = dpt_insert_entries;
299 	vm->cleanup = dpt_cleanup;
300 
301 	vm->vma_ops.bind_vma    = dpt_bind_vma;
302 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
303 
304 	vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
305 
306 	dpt->obj = dpt_obj;
307 	dpt->obj->is_dpt = true;
308 
309 	return &dpt->vm;
310 }
311 
intel_dpt_destroy(struct i915_address_space * vm)312 void intel_dpt_destroy(struct i915_address_space *vm)
313 {
314 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
315 
316 	dpt->obj->is_dpt = false;
317 	i915_vm_put(&dpt->vm);
318 }
319 
intel_dpt_offset(struct i915_vma * dpt_vma)320 u64 intel_dpt_offset(struct i915_vma *dpt_vma)
321 {
322 	return dpt_vma->node.start;
323 }
324