xref: /linux/drivers/gpu/drm/i915/gt/gen8_ppgtt.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gem/i915_gem_lmem.h"
9 
10 #include "gen8_ppgtt.h"
11 #include "i915_scatterlist.h"
12 #include "i915_trace.h"
13 #include "i915_pvinfo.h"
14 #include "i915_vgpu.h"
15 #include "intel_gt.h"
16 #include "intel_gtt.h"
17 
18 static u64 gen8_pde_encode(const dma_addr_t addr,
19 			   const enum i915_cache_level level)
20 {
21 	u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
22 
23 	if (level != I915_CACHE_NONE)
24 		pde |= PPAT_CACHED_PDE;
25 	else
26 		pde |= PPAT_UNCACHED;
27 
28 	return pde;
29 }
30 
31 static u64 gen8_pte_encode(dma_addr_t addr,
32 			   enum i915_cache_level level,
33 			   u32 flags)
34 {
35 	gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
36 
37 	if (unlikely(flags & PTE_READ_ONLY))
38 		pte &= ~GEN8_PAGE_RW;
39 
40 	if (flags & PTE_LM)
41 		pte |= GEN12_PPGTT_PTE_LM;
42 
43 	switch (level) {
44 	case I915_CACHE_NONE:
45 		pte |= PPAT_UNCACHED;
46 		break;
47 	case I915_CACHE_WT:
48 		pte |= PPAT_DISPLAY_ELLC;
49 		break;
50 	default:
51 		pte |= PPAT_CACHED;
52 		break;
53 	}
54 
55 	return pte;
56 }
57 
58 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
59 {
60 	struct drm_i915_private *i915 = ppgtt->vm.i915;
61 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
62 	enum vgt_g2v_type msg;
63 	int i;
64 
65 	if (create)
66 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
67 	else
68 		atomic_dec(px_used(ppgtt->pd));
69 
70 	mutex_lock(&i915->vgpu.lock);
71 
72 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
73 		const u64 daddr = px_dma(ppgtt->pd);
74 
75 		intel_uncore_write(uncore,
76 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
77 		intel_uncore_write(uncore,
78 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
79 
80 		msg = create ?
81 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
82 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
83 	} else {
84 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
85 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
86 
87 			intel_uncore_write(uncore,
88 					   vgtif_reg(pdp[i].lo),
89 					   lower_32_bits(daddr));
90 			intel_uncore_write(uncore,
91 					   vgtif_reg(pdp[i].hi),
92 					   upper_32_bits(daddr));
93 		}
94 
95 		msg = create ?
96 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
97 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
98 	}
99 
100 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
101 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
102 
103 	mutex_unlock(&i915->vgpu.lock);
104 }
105 
106 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
107 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
108 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
109 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
110 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
111 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
112 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
113 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
114 
115 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
116 
117 static unsigned int
118 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
119 {
120 	const int shift = gen8_pd_shift(lvl);
121 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
122 
123 	GEM_BUG_ON(start >= end);
124 	end += ~mask >> gen8_pd_shift(1);
125 
126 	*idx = i915_pde_index(start, shift);
127 	if ((start ^ end) & mask)
128 		return GEN8_PDES - *idx;
129 	else
130 		return i915_pde_index(end, shift) - *idx;
131 }
132 
133 static bool gen8_pd_contains(u64 start, u64 end, int lvl)
134 {
135 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
136 
137 	GEM_BUG_ON(start >= end);
138 	return (start ^ end) & mask && (start & ~mask) == 0;
139 }
140 
141 static unsigned int gen8_pt_count(u64 start, u64 end)
142 {
143 	GEM_BUG_ON(start >= end);
144 	if ((start ^ end) >> gen8_pd_shift(1))
145 		return GEN8_PDES - (start & (GEN8_PDES - 1));
146 	else
147 		return end - start;
148 }
149 
150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
151 {
152 	unsigned int shift = __gen8_pte_shift(vm->top);
153 
154 	return (vm->total + (1ull << shift) - 1) >> shift;
155 }
156 
157 static struct i915_page_directory *
158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
159 {
160 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
161 
162 	if (vm->top == 2)
163 		return ppgtt->pd;
164 	else
165 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
166 }
167 
168 static struct i915_page_directory *
169 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
170 {
171 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
172 }
173 
174 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
175 				 struct i915_page_directory *pd,
176 				 int count, int lvl)
177 {
178 	if (lvl) {
179 		void **pde = pd->entry;
180 
181 		do {
182 			if (!*pde)
183 				continue;
184 
185 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
186 		} while (pde++, --count);
187 	}
188 
189 	free_px(vm, &pd->pt, lvl);
190 }
191 
192 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
193 {
194 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
195 
196 	if (intel_vgpu_active(vm->i915))
197 		gen8_ppgtt_notify_vgt(ppgtt, false);
198 
199 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
200 	free_scratch(vm);
201 }
202 
203 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
204 			      struct i915_page_directory * const pd,
205 			      u64 start, const u64 end, int lvl)
206 {
207 	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
208 	unsigned int idx, len;
209 
210 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
211 
212 	len = gen8_pd_range(start, end, lvl--, &idx);
213 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
214 	    __func__, vm, lvl + 1, start, end,
215 	    idx, len, atomic_read(px_used(pd)));
216 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
217 
218 	do {
219 		struct i915_page_table *pt = pd->entry[idx];
220 
221 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
222 		    gen8_pd_contains(start, end, lvl)) {
223 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
224 			    __func__, vm, lvl + 1, idx, start, end);
225 			clear_pd_entry(pd, idx, scratch);
226 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
227 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
228 			continue;
229 		}
230 
231 		if (lvl) {
232 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
233 						   start, end, lvl);
234 		} else {
235 			unsigned int count;
236 			u64 *vaddr;
237 
238 			count = gen8_pt_count(start, end);
239 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
240 			    __func__, vm, lvl, start, end,
241 			    gen8_pd_index(start, 0), count,
242 			    atomic_read(&pt->used));
243 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
244 
245 			vaddr = px_vaddr(pt);
246 			memset64(vaddr + gen8_pd_index(start, 0),
247 				 vm->scratch[0]->encode,
248 				 count);
249 
250 			atomic_sub(count, &pt->used);
251 			start += count;
252 		}
253 
254 		if (release_pd_entry(pd, idx, pt, scratch))
255 			free_px(vm, pt, lvl);
256 	} while (idx++, --len);
257 
258 	return start;
259 }
260 
261 static void gen8_ppgtt_clear(struct i915_address_space *vm,
262 			     u64 start, u64 length)
263 {
264 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
265 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
266 	GEM_BUG_ON(range_overflows(start, length, vm->total));
267 
268 	start >>= GEN8_PTE_SHIFT;
269 	length >>= GEN8_PTE_SHIFT;
270 	GEM_BUG_ON(length == 0);
271 
272 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
273 			   start, start + length, vm->top);
274 }
275 
276 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
277 			       struct i915_vm_pt_stash *stash,
278 			       struct i915_page_directory * const pd,
279 			       u64 * const start, const u64 end, int lvl)
280 {
281 	unsigned int idx, len;
282 
283 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
284 
285 	len = gen8_pd_range(*start, end, lvl--, &idx);
286 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
287 	    __func__, vm, lvl + 1, *start, end,
288 	    idx, len, atomic_read(px_used(pd)));
289 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
290 
291 	spin_lock(&pd->lock);
292 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
293 	do {
294 		struct i915_page_table *pt = pd->entry[idx];
295 
296 		if (!pt) {
297 			spin_unlock(&pd->lock);
298 
299 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
300 			    __func__, vm, lvl + 1, idx);
301 
302 			pt = stash->pt[!!lvl];
303 			__i915_gem_object_pin_pages(pt->base);
304 
305 			fill_px(pt, vm->scratch[lvl]->encode);
306 
307 			spin_lock(&pd->lock);
308 			if (likely(!pd->entry[idx])) {
309 				stash->pt[!!lvl] = pt->stash;
310 				atomic_set(&pt->used, 0);
311 				set_pd_entry(pd, idx, pt);
312 			} else {
313 				pt = pd->entry[idx];
314 			}
315 		}
316 
317 		if (lvl) {
318 			atomic_inc(&pt->used);
319 			spin_unlock(&pd->lock);
320 
321 			__gen8_ppgtt_alloc(vm, stash,
322 					   as_pd(pt), start, end, lvl);
323 
324 			spin_lock(&pd->lock);
325 			atomic_dec(&pt->used);
326 			GEM_BUG_ON(!atomic_read(&pt->used));
327 		} else {
328 			unsigned int count = gen8_pt_count(*start, end);
329 
330 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
331 			    __func__, vm, lvl, *start, end,
332 			    gen8_pd_index(*start, 0), count,
333 			    atomic_read(&pt->used));
334 
335 			atomic_add(count, &pt->used);
336 			/* All other pdes may be simultaneously removed */
337 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
338 			*start += count;
339 		}
340 	} while (idx++, --len);
341 	spin_unlock(&pd->lock);
342 }
343 
344 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
345 			     struct i915_vm_pt_stash *stash,
346 			     u64 start, u64 length)
347 {
348 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
349 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
350 	GEM_BUG_ON(range_overflows(start, length, vm->total));
351 
352 	start >>= GEN8_PTE_SHIFT;
353 	length >>= GEN8_PTE_SHIFT;
354 	GEM_BUG_ON(length == 0);
355 
356 	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
357 			   &start, start + length, vm->top);
358 }
359 
360 static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
361 				 struct i915_page_directory *pd,
362 				 u64 *start, u64 end, int lvl,
363 				 void (*fn)(struct i915_address_space *vm,
364 					    struct i915_page_table *pt,
365 					    void *data),
366 				 void *data)
367 {
368 	unsigned int idx, len;
369 
370 	len = gen8_pd_range(*start, end, lvl--, &idx);
371 
372 	spin_lock(&pd->lock);
373 	do {
374 		struct i915_page_table *pt = pd->entry[idx];
375 
376 		atomic_inc(&pt->used);
377 		spin_unlock(&pd->lock);
378 
379 		if (lvl) {
380 			__gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
381 					     fn, data);
382 		} else {
383 			fn(vm, pt, data);
384 			*start += gen8_pt_count(*start, end);
385 		}
386 
387 		spin_lock(&pd->lock);
388 		atomic_dec(&pt->used);
389 	} while (idx++, --len);
390 	spin_unlock(&pd->lock);
391 }
392 
393 static void gen8_ppgtt_foreach(struct i915_address_space *vm,
394 			       u64 start, u64 length,
395 			       void (*fn)(struct i915_address_space *vm,
396 					  struct i915_page_table *pt,
397 					  void *data),
398 			       void *data)
399 {
400 	start >>= GEN8_PTE_SHIFT;
401 	length >>= GEN8_PTE_SHIFT;
402 
403 	__gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
404 			     &start, start + length, vm->top,
405 			     fn, data);
406 }
407 
408 static __always_inline u64
409 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
410 		      struct i915_page_directory *pdp,
411 		      struct sgt_dma *iter,
412 		      u64 idx,
413 		      enum i915_cache_level cache_level,
414 		      u32 flags)
415 {
416 	struct i915_page_directory *pd;
417 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
418 	gen8_pte_t *vaddr;
419 
420 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
421 	vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
422 	do {
423 		GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
424 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
425 
426 		iter->dma += I915_GTT_PAGE_SIZE;
427 		if (iter->dma >= iter->max) {
428 			iter->sg = __sg_next(iter->sg);
429 			if (!iter->sg || sg_dma_len(iter->sg) == 0) {
430 				idx = 0;
431 				break;
432 			}
433 
434 			iter->dma = sg_dma_address(iter->sg);
435 			iter->max = iter->dma + sg_dma_len(iter->sg);
436 		}
437 
438 		if (gen8_pd_index(++idx, 0) == 0) {
439 			if (gen8_pd_index(idx, 1) == 0) {
440 				/* Limited by sg length for 3lvl */
441 				if (gen8_pd_index(idx, 2) == 0)
442 					break;
443 
444 				pd = pdp->entry[gen8_pd_index(idx, 2)];
445 			}
446 
447 			clflush_cache_range(vaddr, PAGE_SIZE);
448 			vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
449 		}
450 	} while (1);
451 	clflush_cache_range(vaddr, PAGE_SIZE);
452 
453 	return idx;
454 }
455 
456 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
457 				   struct sgt_dma *iter,
458 				   enum i915_cache_level cache_level,
459 				   u32 flags)
460 {
461 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
462 	unsigned int rem = sg_dma_len(iter->sg);
463 	u64 start = vma->node.start;
464 
465 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
466 
467 	do {
468 		struct i915_page_directory * const pdp =
469 			gen8_pdp_for_page_address(vma->vm, start);
470 		struct i915_page_directory * const pd =
471 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
472 		gen8_pte_t encode = pte_encode;
473 		unsigned int maybe_64K = -1;
474 		unsigned int page_size;
475 		gen8_pte_t *vaddr;
476 		u16 index;
477 
478 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
479 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
480 		    rem >= I915_GTT_PAGE_SIZE_2M &&
481 		    !__gen8_pte_index(start, 0)) {
482 			index = __gen8_pte_index(start, 1);
483 			encode |= GEN8_PDE_PS_2M;
484 			page_size = I915_GTT_PAGE_SIZE_2M;
485 
486 			vaddr = px_vaddr(pd);
487 		} else {
488 			struct i915_page_table *pt =
489 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
490 
491 			index = __gen8_pte_index(start, 0);
492 			page_size = I915_GTT_PAGE_SIZE;
493 
494 			if (!index &&
495 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
496 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
497 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
498 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
499 				maybe_64K = __gen8_pte_index(start, 1);
500 
501 			vaddr = px_vaddr(pt);
502 		}
503 
504 		do {
505 			GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
506 			vaddr[index++] = encode | iter->dma;
507 
508 			start += page_size;
509 			iter->dma += page_size;
510 			rem -= page_size;
511 			if (iter->dma >= iter->max) {
512 				iter->sg = __sg_next(iter->sg);
513 				if (!iter->sg)
514 					break;
515 
516 				rem = sg_dma_len(iter->sg);
517 				if (!rem)
518 					break;
519 
520 				iter->dma = sg_dma_address(iter->sg);
521 				iter->max = iter->dma + rem;
522 
523 				if (maybe_64K != -1 && index < I915_PDES &&
524 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
525 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
526 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
527 					maybe_64K = -1;
528 
529 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
530 					break;
531 			}
532 		} while (rem >= page_size && index < I915_PDES);
533 
534 		clflush_cache_range(vaddr, PAGE_SIZE);
535 
536 		/*
537 		 * Is it safe to mark the 2M block as 64K? -- Either we have
538 		 * filled whole page-table with 64K entries, or filled part of
539 		 * it and have reached the end of the sg table and we have
540 		 * enough padding.
541 		 */
542 		if (maybe_64K != -1 &&
543 		    (index == I915_PDES ||
544 		     (i915_vm_has_scratch_64K(vma->vm) &&
545 		      !iter->sg && IS_ALIGNED(vma->node.start +
546 					      vma->node.size,
547 					      I915_GTT_PAGE_SIZE_2M)))) {
548 			vaddr = px_vaddr(pd);
549 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
550 			clflush_cache_range(vaddr, PAGE_SIZE);
551 			page_size = I915_GTT_PAGE_SIZE_64K;
552 
553 			/*
554 			 * We write all 4K page entries, even when using 64K
555 			 * pages. In order to verify that the HW isn't cheating
556 			 * by using the 4K PTE instead of the 64K PTE, we want
557 			 * to remove all the surplus entries. If the HW skipped
558 			 * the 64K PTE, it will read/write into the scratch page
559 			 * instead - which we detect as missing results during
560 			 * selftests.
561 			 */
562 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
563 				u16 i;
564 
565 				encode = vma->vm->scratch[0]->encode;
566 				vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
567 
568 				for (i = 1; i < index; i += 16)
569 					memset64(vaddr + i, encode, 15);
570 
571 				clflush_cache_range(vaddr, PAGE_SIZE);
572 			}
573 		}
574 
575 		vma->page_sizes.gtt |= page_size;
576 	} while (iter->sg && sg_dma_len(iter->sg));
577 }
578 
579 static void gen8_ppgtt_insert(struct i915_address_space *vm,
580 			      struct i915_vma *vma,
581 			      enum i915_cache_level cache_level,
582 			      u32 flags)
583 {
584 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
585 	struct sgt_dma iter = sgt_dma(vma);
586 
587 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
588 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
589 	} else  {
590 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
591 
592 		do {
593 			struct i915_page_directory * const pdp =
594 				gen8_pdp_for_page_index(vm, idx);
595 
596 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
597 						    cache_level, flags);
598 		} while (idx);
599 
600 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
601 	}
602 }
603 
604 static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
605 				    dma_addr_t addr,
606 				    u64 offset,
607 				    enum i915_cache_level level,
608 				    u32 flags)
609 {
610 	u64 idx = offset >> GEN8_PTE_SHIFT;
611 	struct i915_page_directory * const pdp =
612 		gen8_pdp_for_page_index(vm, idx);
613 	struct i915_page_directory *pd =
614 		i915_pd_entry(pdp, gen8_pd_index(idx, 2));
615 	gen8_pte_t *vaddr;
616 
617 	vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
618 	vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
619 	clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
620 }
621 
622 static int gen8_init_scratch(struct i915_address_space *vm)
623 {
624 	u32 pte_flags;
625 	int ret;
626 	int i;
627 
628 	/*
629 	 * If everybody agrees to not to write into the scratch page,
630 	 * we can reuse it for all vm, keeping contexts and processes separate.
631 	 */
632 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
633 		struct i915_address_space *clone = vm->gt->vm;
634 
635 		GEM_BUG_ON(!clone->has_read_only);
636 
637 		vm->scratch_order = clone->scratch_order;
638 		for (i = 0; i <= vm->top; i++)
639 			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
640 
641 		return 0;
642 	}
643 
644 	ret = setup_scratch_page(vm);
645 	if (ret)
646 		return ret;
647 
648 	pte_flags = vm->has_read_only;
649 	if (i915_gem_object_is_lmem(vm->scratch[0]))
650 		pte_flags |= PTE_LM;
651 
652 	vm->scratch[0]->encode =
653 		gen8_pte_encode(px_dma(vm->scratch[0]),
654 				I915_CACHE_NONE, pte_flags);
655 
656 	for (i = 1; i <= vm->top; i++) {
657 		struct drm_i915_gem_object *obj;
658 
659 		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
660 		if (IS_ERR(obj))
661 			goto free_scratch;
662 
663 		ret = map_pt_dma(vm, obj);
664 		if (ret) {
665 			i915_gem_object_put(obj);
666 			goto free_scratch;
667 		}
668 
669 		fill_px(obj, vm->scratch[i - 1]->encode);
670 		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
671 
672 		vm->scratch[i] = obj;
673 	}
674 
675 	return 0;
676 
677 free_scratch:
678 	while (i--)
679 		i915_gem_object_put(vm->scratch[i]);
680 	return -ENOMEM;
681 }
682 
683 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
684 {
685 	struct i915_address_space *vm = &ppgtt->vm;
686 	struct i915_page_directory *pd = ppgtt->pd;
687 	unsigned int idx;
688 
689 	GEM_BUG_ON(vm->top != 2);
690 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
691 
692 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
693 		struct i915_page_directory *pde;
694 		int err;
695 
696 		pde = alloc_pd(vm);
697 		if (IS_ERR(pde))
698 			return PTR_ERR(pde);
699 
700 		err = map_pt_dma(vm, pde->pt.base);
701 		if (err) {
702 			free_pd(vm, pde);
703 			return err;
704 		}
705 
706 		fill_px(pde, vm->scratch[1]->encode);
707 		set_pd_entry(pd, idx, pde);
708 		atomic_inc(px_used(pde)); /* keep pinned */
709 	}
710 	wmb();
711 
712 	return 0;
713 }
714 
715 static struct i915_page_directory *
716 gen8_alloc_top_pd(struct i915_address_space *vm)
717 {
718 	const unsigned int count = gen8_pd_top_count(vm);
719 	struct i915_page_directory *pd;
720 	int err;
721 
722 	GEM_BUG_ON(count > I915_PDES);
723 
724 	pd = __alloc_pd(count);
725 	if (unlikely(!pd))
726 		return ERR_PTR(-ENOMEM);
727 
728 	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
729 	if (IS_ERR(pd->pt.base)) {
730 		err = PTR_ERR(pd->pt.base);
731 		pd->pt.base = NULL;
732 		goto err_pd;
733 	}
734 
735 	err = map_pt_dma(vm, pd->pt.base);
736 	if (err)
737 		goto err_pd;
738 
739 	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
740 	atomic_inc(px_used(pd)); /* mark as pinned */
741 	return pd;
742 
743 err_pd:
744 	free_pd(vm, pd);
745 	return ERR_PTR(err);
746 }
747 
748 /*
749  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
750  * with a net effect resembling a 2-level page table in normal x86 terms. Each
751  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
752  * space.
753  *
754  */
755 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
756 				     unsigned long lmem_pt_obj_flags)
757 {
758 	struct i915_ppgtt *ppgtt;
759 	int err;
760 
761 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
762 	if (!ppgtt)
763 		return ERR_PTR(-ENOMEM);
764 
765 	ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
766 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
767 	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
768 
769 	/*
770 	 * From bdw, there is hw support for read-only pages in the PPGTT.
771 	 *
772 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
773 	 * for now.
774 	 *
775 	 * Gen12 has inherited the same read-only fault issue from gen11.
776 	 */
777 	ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
778 
779 	if (HAS_LMEM(gt->i915)) {
780 		ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
781 
782 		/*
783 		 * On some platforms the hw has dropped support for 4K GTT pages
784 		 * when dealing with LMEM, and due to the design of 64K GTT
785 		 * pages in the hw, we can only mark the *entire* page-table as
786 		 * operating in 64K GTT mode, since the enable bit is still on
787 		 * the pde, and not the pte. And since we still need to allow
788 		 * 4K GTT pages for SMEM objects, we can't have a "normal" 4K
789 		 * page-table with scratch pointing to LMEM, since that's
790 		 * undefined from the hw pov. The simplest solution is to just
791 		 * move the 64K scratch page to SMEM on such platforms and call
792 		 * it a day, since that should work for all configurations.
793 		 */
794 		if (HAS_64K_PAGES(gt->i915))
795 			ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
796 		else
797 			ppgtt->vm.alloc_scratch_dma = alloc_pt_lmem;
798 	} else {
799 		ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
800 		ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
801 	}
802 
803 	err = gen8_init_scratch(&ppgtt->vm);
804 	if (err)
805 		goto err_free;
806 
807 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
808 	if (IS_ERR(ppgtt->pd)) {
809 		err = PTR_ERR(ppgtt->pd);
810 		goto err_free_scratch;
811 	}
812 
813 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
814 		err = gen8_preallocate_top_level_pdp(ppgtt);
815 		if (err)
816 			goto err_free_pd;
817 	}
818 
819 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
820 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
821 	ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
822 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
823 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
824 	ppgtt->vm.foreach = gen8_ppgtt_foreach;
825 
826 	ppgtt->vm.pte_encode = gen8_pte_encode;
827 
828 	if (intel_vgpu_active(gt->i915))
829 		gen8_ppgtt_notify_vgt(ppgtt, true);
830 
831 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
832 
833 	return ppgtt;
834 
835 err_free_pd:
836 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
837 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
838 err_free_scratch:
839 	free_scratch(&ppgtt->vm);
840 err_free:
841 	kfree(ppgtt);
842 	return ERR_PTR(err);
843 }
844