xref: /linux/drivers/gpu/drm/i915/gt/gen6_ppgtt.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gen6_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_vgpu.h"
12 #include "intel_gt.h"
13 
14 /* Write pde (index) from the page directory @pd to the page table @pt */
15 static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
16 				  const unsigned int pde,
17 				  const struct i915_page_table *pt)
18 {
19 	/* Caller needs to make sure the write completes if necessary */
20 	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
21 		  ppgtt->pd_addr + pde);
22 }
23 
24 void gen7_ppgtt_enable(struct intel_gt *gt)
25 {
26 	struct drm_i915_private *i915 = gt->i915;
27 	struct intel_uncore *uncore = gt->uncore;
28 	struct intel_engine_cs *engine;
29 	enum intel_engine_id id;
30 	u32 ecochk;
31 
32 	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
33 
34 	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
35 	if (IS_HASWELL(i915)) {
36 		ecochk |= ECOCHK_PPGTT_WB_HSW;
37 	} else {
38 		ecochk |= ECOCHK_PPGTT_LLC_IVB;
39 		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
40 	}
41 	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
42 
43 	for_each_engine(engine, gt, id) {
44 		/* GFX_MODE is per-ring on gen7+ */
45 		ENGINE_WRITE(engine,
46 			     RING_MODE_GEN7,
47 			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
48 	}
49 }
50 
51 void gen6_ppgtt_enable(struct intel_gt *gt)
52 {
53 	struct intel_uncore *uncore = gt->uncore;
54 
55 	intel_uncore_rmw(uncore,
56 			 GAC_ECO_BITS,
57 			 0,
58 			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
59 
60 	intel_uncore_rmw(uncore,
61 			 GAB_CTL,
62 			 0,
63 			 GAB_CTL_CONT_AFTER_PAGEFAULT);
64 
65 	intel_uncore_rmw(uncore,
66 			 GAM_ECOCHK,
67 			 0,
68 			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
69 
70 	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
71 		intel_uncore_write(uncore,
72 				   GFX_MODE,
73 				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
74 }
75 
76 /* PPGTT support for Sandybdrige/Gen6 and later */
77 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
78 				   u64 start, u64 length)
79 {
80 	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
81 	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
82 	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
83 	unsigned int pde = first_entry / GEN6_PTES;
84 	unsigned int pte = first_entry % GEN6_PTES;
85 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
86 
87 	while (num_entries) {
88 		struct i915_page_table * const pt =
89 			i915_pt_entry(ppgtt->base.pd, pde++);
90 		const unsigned int count = min(num_entries, GEN6_PTES - pte);
91 		gen6_pte_t *vaddr;
92 
93 		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
94 
95 		num_entries -= count;
96 
97 		GEM_BUG_ON(count > atomic_read(&pt->used));
98 		if (!atomic_sub_return(count, &pt->used))
99 			ppgtt->scan_for_unused_pt = true;
100 
101 		/*
102 		 * Note that the hw doesn't support removing PDE on the fly
103 		 * (they are cached inside the context with no means to
104 		 * invalidate the cache), so we can only reset the PTE
105 		 * entries back to scratch.
106 		 */
107 
108 		vaddr = kmap_atomic_px(pt);
109 		memset32(vaddr + pte, scratch_pte, count);
110 		kunmap_atomic(vaddr);
111 
112 		pte = 0;
113 	}
114 }
115 
116 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
117 				      struct i915_vma *vma,
118 				      enum i915_cache_level cache_level,
119 				      u32 flags)
120 {
121 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
122 	struct i915_page_directory * const pd = ppgtt->pd;
123 	unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
124 	unsigned int act_pt = first_entry / GEN6_PTES;
125 	unsigned int act_pte = first_entry % GEN6_PTES;
126 	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
127 	struct sgt_dma iter = sgt_dma(vma);
128 	gen6_pte_t *vaddr;
129 
130 	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
131 
132 	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
133 	do {
134 		GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
135 		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
136 
137 		iter.dma += I915_GTT_PAGE_SIZE;
138 		if (iter.dma == iter.max) {
139 			iter.sg = __sg_next(iter.sg);
140 			if (!iter.sg)
141 				break;
142 
143 			iter.dma = sg_dma_address(iter.sg);
144 			iter.max = iter.dma + iter.sg->length;
145 		}
146 
147 		if (++act_pte == GEN6_PTES) {
148 			kunmap_atomic(vaddr);
149 			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
150 			act_pte = 0;
151 		}
152 	} while (1);
153 	kunmap_atomic(vaddr);
154 
155 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
156 }
157 
158 static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
159 {
160 	struct i915_page_directory * const pd = ppgtt->base.pd;
161 	struct i915_page_table *pt;
162 	unsigned int pde;
163 
164 	start = round_down(start, SZ_64K);
165 	end = round_up(end, SZ_64K) - start;
166 
167 	mutex_lock(&ppgtt->flush);
168 
169 	gen6_for_each_pde(pt, pd, start, end, pde)
170 		gen6_write_pde(ppgtt, pde, pt);
171 
172 	mb();
173 	ioread32(ppgtt->pd_addr + pde - 1);
174 	gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
175 	mb();
176 
177 	mutex_unlock(&ppgtt->flush);
178 }
179 
180 static int gen6_alloc_va_range(struct i915_address_space *vm,
181 			       u64 start, u64 length)
182 {
183 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
184 	struct i915_page_directory * const pd = ppgtt->base.pd;
185 	struct i915_page_table *pt, *alloc = NULL;
186 	bool flush = false;
187 	u64 from = start;
188 	unsigned int pde;
189 	int ret = 0;
190 
191 	spin_lock(&pd->lock);
192 	gen6_for_each_pde(pt, pd, start, length, pde) {
193 		const unsigned int count = gen6_pte_count(start, length);
194 
195 		if (px_base(pt) == px_base(&vm->scratch[1])) {
196 			spin_unlock(&pd->lock);
197 
198 			pt = fetch_and_zero(&alloc);
199 			if (!pt)
200 				pt = alloc_pt(vm);
201 			if (IS_ERR(pt)) {
202 				ret = PTR_ERR(pt);
203 				goto unwind_out;
204 			}
205 
206 			fill32_px(pt, vm->scratch[0].encode);
207 
208 			spin_lock(&pd->lock);
209 			if (pd->entry[pde] == &vm->scratch[1]) {
210 				pd->entry[pde] = pt;
211 			} else {
212 				alloc = pt;
213 				pt = pd->entry[pde];
214 			}
215 
216 			flush = true;
217 		}
218 
219 		atomic_add(count, &pt->used);
220 	}
221 	spin_unlock(&pd->lock);
222 
223 	if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
224 		intel_wakeref_t wakeref;
225 
226 		with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
227 			gen6_flush_pd(ppgtt, from, start);
228 	}
229 
230 	goto out;
231 
232 unwind_out:
233 	gen6_ppgtt_clear_range(vm, from, start - from);
234 out:
235 	if (alloc)
236 		free_px(vm, alloc);
237 	return ret;
238 }
239 
240 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
241 {
242 	struct i915_address_space * const vm = &ppgtt->base.vm;
243 	struct i915_page_directory * const pd = ppgtt->base.pd;
244 	int ret;
245 
246 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
247 	if (ret)
248 		return ret;
249 
250 	vm->scratch[0].encode =
251 		vm->pte_encode(px_dma(&vm->scratch[0]),
252 			       I915_CACHE_NONE, PTE_READ_ONLY);
253 
254 	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
255 		cleanup_scratch_page(vm);
256 		return -ENOMEM;
257 	}
258 
259 	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
260 	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
261 
262 	return 0;
263 }
264 
265 static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
266 {
267 	struct i915_page_directory * const pd = ppgtt->base.pd;
268 	struct i915_page_dma * const scratch =
269 		px_base(&ppgtt->base.vm.scratch[1]);
270 	struct i915_page_table *pt;
271 	u32 pde;
272 
273 	gen6_for_all_pdes(pt, pd, pde)
274 		if (px_base(pt) != scratch)
275 			free_px(&ppgtt->base.vm, pt);
276 }
277 
278 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
279 {
280 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
281 
282 	__i915_vma_put(ppgtt->vma);
283 
284 	gen6_ppgtt_free_pd(ppgtt);
285 	free_scratch(vm);
286 
287 	mutex_destroy(&ppgtt->flush);
288 	mutex_destroy(&ppgtt->pin_mutex);
289 	kfree(ppgtt->base.pd);
290 }
291 
292 static int pd_vma_set_pages(struct i915_vma *vma)
293 {
294 	vma->pages = ERR_PTR(-ENODEV);
295 	return 0;
296 }
297 
298 static void pd_vma_clear_pages(struct i915_vma *vma)
299 {
300 	GEM_BUG_ON(!vma->pages);
301 
302 	vma->pages = NULL;
303 }
304 
305 static int pd_vma_bind(struct i915_address_space *vm,
306 		       struct i915_vma *vma,
307 		       enum i915_cache_level cache_level,
308 		       u32 unused)
309 {
310 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
311 	struct gen6_ppgtt *ppgtt = vma->private;
312 	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
313 
314 	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
315 	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
316 
317 	gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
318 	return 0;
319 }
320 
321 static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
322 {
323 	struct gen6_ppgtt *ppgtt = vma->private;
324 	struct i915_page_directory * const pd = ppgtt->base.pd;
325 	struct i915_page_dma * const scratch =
326 		px_base(&ppgtt->base.vm.scratch[1]);
327 	struct i915_page_table *pt;
328 	unsigned int pde;
329 
330 	if (!ppgtt->scan_for_unused_pt)
331 		return;
332 
333 	/* Free all no longer used page tables */
334 	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
335 		if (px_base(pt) == scratch || atomic_read(&pt->used))
336 			continue;
337 
338 		free_px(&ppgtt->base.vm, pt);
339 		pd->entry[pde] = scratch;
340 	}
341 
342 	ppgtt->scan_for_unused_pt = false;
343 }
344 
345 static const struct i915_vma_ops pd_vma_ops = {
346 	.set_pages = pd_vma_set_pages,
347 	.clear_pages = pd_vma_clear_pages,
348 	.bind_vma = pd_vma_bind,
349 	.unbind_vma = pd_vma_unbind,
350 };
351 
352 static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
353 {
354 	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
355 	struct i915_vma *vma;
356 
357 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
358 	GEM_BUG_ON(size > ggtt->vm.total);
359 
360 	vma = i915_vma_alloc();
361 	if (!vma)
362 		return ERR_PTR(-ENOMEM);
363 
364 	i915_active_init(&vma->active, NULL, NULL);
365 
366 	kref_init(&vma->ref);
367 	mutex_init(&vma->pages_mutex);
368 	vma->vm = i915_vm_get(&ggtt->vm);
369 	vma->ops = &pd_vma_ops;
370 	vma->private = ppgtt;
371 
372 	vma->size = size;
373 	vma->fence_size = size;
374 	atomic_set(&vma->flags, I915_VMA_GGTT);
375 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
376 
377 	INIT_LIST_HEAD(&vma->obj_link);
378 	INIT_LIST_HEAD(&vma->closed_link);
379 
380 	return vma;
381 }
382 
383 int gen6_ppgtt_pin(struct i915_ppgtt *base)
384 {
385 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
386 	int err;
387 
388 	GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
389 
390 	/*
391 	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
392 	 * which will be pinned into every active context.
393 	 * (When vma->pin_count becomes atomic, I expect we will naturally
394 	 * need a larger, unpacked, type and kill this redundancy.)
395 	 */
396 	if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
397 		return 0;
398 
399 	if (mutex_lock_interruptible(&ppgtt->pin_mutex))
400 		return -EINTR;
401 
402 	/*
403 	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
404 	 * allocator works in address space sizes, so it's multiplied by page
405 	 * size. We allocate at the top of the GTT to avoid fragmentation.
406 	 */
407 	err = 0;
408 	if (!atomic_read(&ppgtt->pin_count))
409 		err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
410 	if (!err)
411 		atomic_inc(&ppgtt->pin_count);
412 	mutex_unlock(&ppgtt->pin_mutex);
413 
414 	return err;
415 }
416 
417 void gen6_ppgtt_unpin(struct i915_ppgtt *base)
418 {
419 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
420 
421 	GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
422 	if (atomic_dec_and_test(&ppgtt->pin_count))
423 		i915_vma_unpin(ppgtt->vma);
424 }
425 
426 void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
427 {
428 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
429 
430 	if (!atomic_read(&ppgtt->pin_count))
431 		return;
432 
433 	i915_vma_unpin(ppgtt->vma);
434 	atomic_set(&ppgtt->pin_count, 0);
435 }
436 
437 struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
438 {
439 	struct i915_ggtt * const ggtt = gt->ggtt;
440 	struct gen6_ppgtt *ppgtt;
441 	int err;
442 
443 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
444 	if (!ppgtt)
445 		return ERR_PTR(-ENOMEM);
446 
447 	mutex_init(&ppgtt->flush);
448 	mutex_init(&ppgtt->pin_mutex);
449 
450 	ppgtt_init(&ppgtt->base, gt);
451 	ppgtt->base.vm.top = 1;
452 
453 	ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
454 	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
455 	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
456 	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
457 	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
458 
459 	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
460 
461 	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
462 	if (!ppgtt->base.pd) {
463 		err = -ENOMEM;
464 		goto err_free;
465 	}
466 
467 	err = gen6_ppgtt_init_scratch(ppgtt);
468 	if (err)
469 		goto err_pd;
470 
471 	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
472 	if (IS_ERR(ppgtt->vma)) {
473 		err = PTR_ERR(ppgtt->vma);
474 		goto err_scratch;
475 	}
476 
477 	return &ppgtt->base;
478 
479 err_scratch:
480 	free_scratch(&ppgtt->base.vm);
481 err_pd:
482 	kfree(ppgtt->base.pd);
483 err_free:
484 	mutex_destroy(&ppgtt->pin_mutex);
485 	kfree(ppgtt);
486 	return ERR_PTR(err);
487 }
488