xref: /linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision 41c177cf354126a22443b5c80cec9fdd313e67e1)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  * Please try to maintain the following order within this file unless it makes
6  * sense to do otherwise. From top to bottom:
7  * 1. typedefs
8  * 2. #defines, and macros
9  * 3. structure definitions
10  * 4. function prototypes
11  *
12  * Within each section, please try to order by generation in ascending order,
13  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14  */
15 
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18 
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25 
26 #include <drm/drm_mm.h>
27 
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_resource.h"
31 #include "i915_vma_types.h"
32 #include "i915_params.h"
33 #include "intel_memory_region.h"
34 
35 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
36 
37 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
38 #define GTT_TRACE(...) trace_printk(__VA_ARGS__)
39 #else
40 #define GTT_TRACE(...)
41 #endif
42 
43 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
44 
45 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
46 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
47 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
48 
49 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51 
52 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53 
54 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
55 
56 #define I915_FENCE_REG_NONE -1
57 #define I915_MAX_NUM_FENCES 32
58 /* 32 fences + sign bit for FENCE_REG_NONE */
59 #define I915_MAX_NUM_FENCE_BITS 6
60 
61 typedef u32 gen6_pte_t;
62 typedef u64 gen8_pte_t;
63 
64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
65 
66 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
67 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
68 #define I915_PDES			512
69 #define I915_PDE_MASK			(I915_PDES - 1)
70 
71 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
72 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
73 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
74 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
75 #define GEN6_PTE_CACHE_LLC		(2 << 1)
76 #define GEN6_PTE_UNCACHED		(1 << 1)
77 #define GEN6_PTE_VALID			REG_BIT(0)
78 
79 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
80 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
81 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
82 #define GEN6_PDE_SHIFT			22
83 #define GEN6_PDE_VALID			REG_BIT(0)
84 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
85 
86 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
87 
88 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
89 #define BYT_PTE_WRITEABLE		REG_BIT(1)
90 
91 #define MTL_PPGTT_PTE_PAT3	BIT_ULL(62)
92 #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
93 #define GEN12_PPGTT_PTE_PAT2	BIT_ULL(7)
94 #define GEN12_PPGTT_PTE_PAT1	BIT_ULL(4)
95 #define GEN12_PPGTT_PTE_PAT0	BIT_ULL(3)
96 
97 #define GEN12_GGTT_PTE_LM		BIT_ULL(1)
98 #define MTL_GGTT_PTE_PAT0		BIT_ULL(52)
99 #define MTL_GGTT_PTE_PAT1		BIT_ULL(53)
100 #define GEN12_GGTT_PTE_ADDR_MASK	GENMASK_ULL(45, 12)
101 #define MTL_GGTT_PTE_PAT_MASK		GENMASK_ULL(53, 52)
102 
103 #define GEN12_PDE_64K BIT(6)
104 #define GEN12_PTE_PS64 BIT(8)
105 
106 /*
107  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
108  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
109  */
110 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
111 					 (((bits) & 0x8) << (11 - 3)))
112 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
113 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
114 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
115 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
116 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
117 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
118 #define HSW_PTE_UNCACHED		(0)
119 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
120 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
121 
122 /*
123  * GEN8 32b style address is defined as a 3 level page table:
124  * 31:30 | 29:21 | 20:12 |  11:0
125  * PDPE  |  PDE  |  PTE  | offset
126  * The difference as compared to normal x86 3 level page table is the PDPEs are
127  * programmed via register.
128  *
129  * GEN8 48b style address is defined as a 4 level page table:
130  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
131  * PML4E | PDPE  |  PDE  |  PTE  | offset
132  */
133 #define GEN8_3LVL_PDPES			4
134 
135 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
136 #define PPAT_CACHED_PDE			0 /* WB LLC */
137 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
138 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
139 
140 #define CHV_PPAT_SNOOP			REG_BIT(6)
141 #define GEN8_PPAT_AGE(x)		((x)<<4)
142 #define GEN8_PPAT_LLCeLLC		(3<<2)
143 #define GEN8_PPAT_LLCELLC		(2<<2)
144 #define GEN8_PPAT_LLC			(1<<2)
145 #define GEN8_PPAT_WB			(3<<0)
146 #define GEN8_PPAT_WT			(2<<0)
147 #define GEN8_PPAT_WC			(1<<0)
148 #define GEN8_PPAT_UC			(0<<0)
149 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
150 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
151 
152 #define GEN8_PAGE_PRESENT		BIT_ULL(0)
153 #define GEN8_PAGE_RW			BIT_ULL(1)
154 
155 #define GEN8_PDE_IPS_64K BIT(11)
156 #define GEN8_PDE_PS_2M   BIT(7)
157 
158 #define MTL_PPAT_L4_CACHE_POLICY_MASK	REG_GENMASK(3, 2)
159 #define MTL_PAT_INDEX_COH_MODE_MASK	REG_GENMASK(1, 0)
160 #define MTL_PPAT_L4_3_UC	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3)
161 #define MTL_PPAT_L4_1_WT	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1)
162 #define MTL_PPAT_L4_0_WB	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0)
163 #define MTL_3_COH_2W	REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3)
164 #define MTL_2_COH_1W	REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2)
165 
166 struct drm_i915_gem_object;
167 struct i915_fence_reg;
168 struct i915_vma;
169 struct intel_gt;
170 
171 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
172 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
173 
174 #define for_each_sgt_daddr_next(__dp, __iter) \
175 	__for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE)
176 
177 struct i915_page_table {
178 	struct drm_i915_gem_object *base;
179 	union {
180 		atomic_t used;
181 		struct i915_page_table *stash;
182 	};
183 	bool is_compact;
184 };
185 
186 struct i915_page_directory {
187 	struct i915_page_table pt;
188 	spinlock_t lock;
189 	void **entry;
190 };
191 
192 #define __px_choose_expr(x, type, expr, other) \
193 	__builtin_choose_expr( \
194 	__builtin_types_compatible_p(typeof(x), type) || \
195 	__builtin_types_compatible_p(typeof(x), const type), \
196 	({ type __x = (type)(x); expr; }), \
197 	other)
198 
199 #define px_base(px) \
200 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
201 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
202 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
203 	(void)0)))
204 
205 struct page *__px_page(struct drm_i915_gem_object *p);
206 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
207 #define px_dma(px) (__px_dma(px_base(px)))
208 
209 void *__px_vaddr(struct drm_i915_gem_object *p);
210 #define px_vaddr(px) (__px_vaddr(px_base(px)))
211 
212 #define px_pt(px) \
213 	__px_choose_expr(px, struct i915_page_table *, __x, \
214 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
215 	(void)0))
216 #define px_used(px) (&px_pt(px)->used)
217 
218 struct i915_vm_pt_stash {
219 	/* preallocated chains of page tables/directories */
220 	struct i915_page_table *pt[2];
221 	/*
222 	 * Optionally override the alignment/size of the physical page that
223 	 * contains each PT. If not set defaults back to the usual
224 	 * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
225 	 * structures. MUST be a power-of-two. ONLY applicable on discrete
226 	 * platforms.
227 	 */
228 	int pt_sz;
229 };
230 
231 struct i915_vma_ops {
232 	/* Map an object into an address space with the given cache flags. */
233 	void (*bind_vma)(struct i915_address_space *vm,
234 			 struct i915_vm_pt_stash *stash,
235 			 struct i915_vma_resource *vma_res,
236 			 unsigned int pat_index,
237 			 u32 flags);
238 	/*
239 	 * Unmap an object from an address space. This usually consists of
240 	 * setting the valid PTE entries to a reserved scratch page.
241 	 */
242 	void (*unbind_vma)(struct i915_address_space *vm,
243 			   struct i915_vma_resource *vma_res);
244 
245 };
246 
247 struct i915_address_space {
248 	struct kref ref;
249 	struct work_struct release_work;
250 
251 	struct drm_mm mm;
252 	struct {
253 		struct drm_i915_gem_object *obj;
254 		struct i915_vma *vma;
255 	} rsvd;
256 	struct intel_gt *gt;
257 	struct drm_i915_private *i915;
258 	struct drm_i915_file_private *fpriv;
259 	struct device *dma;
260 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
261 	u64 reserved;		/* size addr space reserved */
262 	u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
263 
264 	unsigned int bind_async_flags;
265 
266 	struct mutex mutex; /* protects vma and our lists */
267 
268 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
269 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
270 #define VM_CLASS_GGTT 0
271 #define VM_CLASS_PPGTT 1
272 #define VM_CLASS_DPT 2
273 
274 	struct drm_i915_gem_object *scratch[4];
275 	/**
276 	 * List of vma currently bound.
277 	 */
278 	struct list_head bound_list;
279 
280 	/**
281 	 * List of vmas not yet bound or evicted.
282 	 */
283 	struct list_head unbound_list;
284 
285 	/* Global GTT */
286 	bool is_ggtt:1;
287 
288 	/* Display page table */
289 	bool is_dpt:1;
290 
291 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
292 	bool has_read_only:1;
293 
294 	/* Skip pte rewrite on unbind for suspend. Protected by @mutex */
295 	bool skip_pte_rewrite:1;
296 
297 	u8 top;
298 	u8 pd_shift;
299 	u8 scratch_order;
300 
301 	/* Flags used when creating page-table objects for this vm */
302 	unsigned long lmem_pt_obj_flags;
303 
304 	/* Interval tree for pending unbind vma resources */
305 	struct rb_root_cached pending_unbind;
306 
307 	struct drm_i915_gem_object *
308 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
309 	struct drm_i915_gem_object *
310 		(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
311 
312 	u64 (*pte_encode)(dma_addr_t addr,
313 			  unsigned int pat_index,
314 			  u32 flags); /* Create a valid PTE */
315 #define PTE_READ_ONLY	BIT(0)
316 #define PTE_LM		BIT(1)
317 
318 	void (*allocate_va_range)(struct i915_address_space *vm,
319 				  struct i915_vm_pt_stash *stash,
320 				  u64 start, u64 length);
321 	void (*clear_range)(struct i915_address_space *vm,
322 			    u64 start, u64 length);
323 	void (*scratch_range)(struct i915_address_space *vm,
324 			      u64 start, u64 length);
325 	void (*insert_page)(struct i915_address_space *vm,
326 			    dma_addr_t addr,
327 			    u64 offset,
328 			    unsigned int pat_index,
329 			    u32 flags);
330 	void (*insert_entries)(struct i915_address_space *vm,
331 			       struct i915_vma_resource *vma_res,
332 			       unsigned int pat_index,
333 			       u32 flags);
334 	void (*raw_insert_page)(struct i915_address_space *vm,
335 				dma_addr_t addr,
336 				u64 offset,
337 				unsigned int pat_index,
338 				u32 flags);
339 	void (*raw_insert_entries)(struct i915_address_space *vm,
340 				   struct i915_vma_resource *vma_res,
341 				   unsigned int pat_index,
342 				   u32 flags);
343 	void (*cleanup)(struct i915_address_space *vm);
344 
345 	void (*foreach)(struct i915_address_space *vm,
346 			u64 start, u64 length,
347 			void (*fn)(struct i915_address_space *vm,
348 				   struct i915_page_table *pt,
349 				   void *data),
350 			void *data);
351 
352 	struct i915_vma_ops vma_ops;
353 
354 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
355 	I915_SELFTEST_DECLARE(bool scrub_64K);
356 };
357 
358 /*
359  * The Graphics Translation Table is the way in which GEN hardware translates a
360  * Graphics Virtual Address into a Physical Address. In addition to the normal
361  * collateral associated with any va->pa translations GEN hardware also has a
362  * portion of the GTT which can be mapped by the CPU and remain both coherent
363  * and correct (in cases like swizzling). That region is referred to as GMADR in
364  * the spec.
365  */
366 struct i915_ggtt {
367 	struct i915_address_space vm;
368 
369 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
370 	struct resource gmadr;          /* GMADR resource */
371 	resource_size_t mappable_end;	/* End offset that we can CPU map */
372 
373 	/** "Graphics Stolen Memory" holds the global PTEs */
374 	void __iomem *gsm;
375 	void (*invalidate)(struct i915_ggtt *ggtt);
376 
377 	/** PPGTT used for aliasing the PPGTT with the GTT */
378 	struct i915_ppgtt *alias;
379 
380 	bool do_idle_maps;
381 
382 	int mtrr;
383 
384 	/** Bit 6 swizzling required for X tiling */
385 	u32 bit_6_swizzle_x;
386 	/** Bit 6 swizzling required for Y tiling */
387 	u32 bit_6_swizzle_y;
388 
389 	u32 pin_bias;
390 
391 	unsigned int num_fences;
392 	struct i915_fence_reg *fence_regs;
393 	struct list_head fence_list;
394 
395 	/**
396 	 * List of all objects in gtt_space, currently mmaped by userspace.
397 	 * All objects within this list must also be on bound_list.
398 	 */
399 	struct list_head userfault_list;
400 
401 	struct mutex error_mutex;
402 	struct drm_mm_node error_capture;
403 	struct drm_mm_node uc_fw;
404 
405 	/** List of GTs mapping this GGTT */
406 	struct list_head gt_list;
407 };
408 
409 struct i915_ppgtt {
410 	struct i915_address_space vm;
411 
412 	struct i915_page_directory *pd;
413 };
414 
415 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
416 #define i915_is_dpt(vm) ((vm)->is_dpt)
417 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
418 
419 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
420 
421 int __must_check
422 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
423 
424 static inline bool
i915_vm_is_4lvl(const struct i915_address_space * vm)425 i915_vm_is_4lvl(const struct i915_address_space *vm)
426 {
427 	return (vm->total - 1) >> 32;
428 }
429 
430 static inline bool
i915_vm_has_scratch_64K(struct i915_address_space * vm)431 i915_vm_has_scratch_64K(struct i915_address_space *vm)
432 {
433 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
434 }
435 
i915_vm_min_alignment(struct i915_address_space * vm,enum intel_memory_type type)436 static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
437 					enum intel_memory_type type)
438 {
439 	/* avoid INTEL_MEMORY_MOCK overflow */
440 	if ((int)type >= ARRAY_SIZE(vm->min_alignment))
441 		type = INTEL_MEMORY_SYSTEM;
442 
443 	return vm->min_alignment[type];
444 }
445 
i915_vm_obj_min_alignment(struct i915_address_space * vm,struct drm_i915_gem_object * obj)446 static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
447 					    struct drm_i915_gem_object  *obj)
448 {
449 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
450 	enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
451 
452 	return i915_vm_min_alignment(vm, type);
453 }
454 
455 static inline bool
i915_vm_has_cache_coloring(struct i915_address_space * vm)456 i915_vm_has_cache_coloring(struct i915_address_space *vm)
457 {
458 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
459 }
460 
461 static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space * vm)462 i915_vm_to_ggtt(struct i915_address_space *vm)
463 {
464 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
465 	GEM_BUG_ON(!i915_is_ggtt(vm));
466 	return container_of(vm, struct i915_ggtt, vm);
467 }
468 
469 static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space * vm)470 i915_vm_to_ppgtt(struct i915_address_space *vm)
471 {
472 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
473 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
474 	return container_of(vm, struct i915_ppgtt, vm);
475 }
476 
477 static inline struct i915_address_space *
i915_vm_get(struct i915_address_space * vm)478 i915_vm_get(struct i915_address_space *vm)
479 {
480 	kref_get(&vm->ref);
481 	return vm;
482 }
483 
484 static inline struct i915_address_space *
i915_vm_tryget(struct i915_address_space * vm)485 i915_vm_tryget(struct i915_address_space *vm)
486 {
487 	return kref_get_unless_zero(&vm->ref) ? vm : NULL;
488 }
489 
assert_vm_alive(struct i915_address_space * vm)490 static inline void assert_vm_alive(struct i915_address_space *vm)
491 {
492 	GEM_BUG_ON(!kref_read(&vm->ref));
493 }
494 
495 /**
496  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
497  * @vm: The vm whose reservation lock we want to share.
498  *
499  * Return: A pointer to the vm's reservation lock.
500  */
i915_vm_resv_get(struct i915_address_space * vm)501 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
502 {
503 	kref_get(&vm->resv_ref);
504 	return &vm->_resv;
505 }
506 
507 void i915_vm_release(struct kref *kref);
508 
509 void i915_vm_resv_release(struct kref *kref);
510 
i915_vm_put(struct i915_address_space * vm)511 static inline void i915_vm_put(struct i915_address_space *vm)
512 {
513 	kref_put(&vm->ref, i915_vm_release);
514 }
515 
516 /**
517  * i915_vm_resv_put - Release a reference on the vm's reservation lock
518  * @vm: The vm whose reservation lock reference we want to release
519  */
i915_vm_resv_put(struct i915_address_space * vm)520 static inline void i915_vm_resv_put(struct i915_address_space *vm)
521 {
522 	kref_put(&vm->resv_ref, i915_vm_resv_release);
523 }
524 
525 void i915_address_space_init(struct i915_address_space *vm, int subclass);
526 void i915_address_space_fini(struct i915_address_space *vm);
527 
i915_pte_index(u64 address,unsigned int pde_shift)528 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
529 {
530 	const u32 mask = NUM_PTE(pde_shift) - 1;
531 
532 	return (address >> PAGE_SHIFT) & mask;
533 }
534 
535 /*
536  * Helper to counts the number of PTEs within the given length. This count
537  * does not cross a page table boundary, so the max value would be
538  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
539  */
i915_pte_count(u64 addr,u64 length,unsigned int pde_shift)540 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
541 {
542 	const u64 mask = ~((1ULL << pde_shift) - 1);
543 	u64 end;
544 
545 	GEM_BUG_ON(length == 0);
546 	GEM_BUG_ON(offset_in_page(addr | length));
547 
548 	end = addr + length;
549 
550 	if ((addr & mask) != (end & mask))
551 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
552 
553 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
554 }
555 
i915_pde_index(u64 addr,u32 shift)556 static inline u32 i915_pde_index(u64 addr, u32 shift)
557 {
558 	return (addr >> shift) & I915_PDE_MASK;
559 }
560 
561 static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,const unsigned short n)562 i915_pt_entry(const struct i915_page_directory * const pd,
563 	      const unsigned short n)
564 {
565 	return pd->entry[n];
566 }
567 
568 static inline struct i915_page_directory *
i915_pd_entry(const struct i915_page_directory * const pdp,const unsigned short n)569 i915_pd_entry(const struct i915_page_directory * const pdp,
570 	      const unsigned short n)
571 {
572 	return pdp->entry[n];
573 }
574 
575 static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt * ppgtt,const unsigned int n)576 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
577 {
578 	struct i915_page_table *pt = ppgtt->pd->entry[n];
579 
580 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
581 }
582 
583 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
584 		unsigned long lmem_pt_obj_flags);
585 void intel_ggtt_bind_vma(struct i915_address_space *vm,
586 			 struct i915_vm_pt_stash *stash,
587 			 struct i915_vma_resource *vma_res,
588 			 unsigned int pat_index,
589 			 u32 flags);
590 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
591 			   struct i915_vma_resource *vma_res);
592 
593 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
594 int i915_ggtt_init_hw(struct drm_i915_private *i915);
595 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
596 int i915_init_ggtt(struct drm_i915_private *i915);
597 void i915_ggtt_driver_release(struct drm_i915_private *i915);
598 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
599 struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915);
600 
i915_ggtt_has_aperture(const struct i915_ggtt * ggtt)601 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
602 {
603 	return ggtt->mappable_end > 0;
604 }
605 
606 int i915_ppgtt_init_hw(struct intel_gt *gt);
607 
608 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
609 				     unsigned long lmem_pt_obj_flags);
610 
611 void i915_ggtt_suspend_vm(struct i915_address_space *vm);
612 bool i915_ggtt_resume_vm(struct i915_address_space *vm);
613 void i915_ggtt_suspend(struct i915_ggtt *gtt);
614 void i915_ggtt_resume(struct i915_ggtt *ggtt);
615 
616 void
617 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
618 
619 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
620 #define fill32_px(px, v) do {						\
621 	u64 v__ = lower_32_bits(v);					\
622 	fill_px((px), v__ << 32 | v__);					\
623 } while (0)
624 
625 int setup_scratch_page(struct i915_address_space *vm);
626 void free_scratch(struct i915_address_space *vm);
627 
628 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
629 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
630 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
631 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
632 struct i915_page_directory *__alloc_pd(int npde);
633 
634 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
635 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
636 
637 void free_px(struct i915_address_space *vm,
638 	     struct i915_page_table *pt, int lvl);
639 #define free_pt(vm, px) free_px(vm, px, 0)
640 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
641 
642 void
643 __set_pd_entry(struct i915_page_directory * const pd,
644 	       const unsigned short idx,
645 	       struct i915_page_table *pt,
646 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
647 
648 #define set_pd_entry(pd, idx, to) \
649 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
650 
651 void
652 clear_pd_entry(struct i915_page_directory * const pd,
653 	       const unsigned short idx,
654 	       const struct drm_i915_gem_object * const scratch);
655 
656 bool
657 release_pd_entry(struct i915_page_directory * const pd,
658 		 const unsigned short idx,
659 		 struct i915_page_table * const pt,
660 		 const struct drm_i915_gem_object * const scratch);
661 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
662 
663 void ppgtt_bind_vma(struct i915_address_space *vm,
664 		    struct i915_vm_pt_stash *stash,
665 		    struct i915_vma_resource *vma_res,
666 		    unsigned int pat_index,
667 		    u32 flags);
668 void ppgtt_unbind_vma(struct i915_address_space *vm,
669 		      struct i915_vma_resource *vma_res);
670 
671 void gtt_write_workarounds(struct intel_gt *gt);
672 
673 void setup_private_pat(struct intel_gt *gt);
674 
675 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
676 			   struct i915_vm_pt_stash *stash,
677 			   u64 size);
678 int i915_vm_map_pt_stash(struct i915_address_space *vm,
679 			 struct i915_vm_pt_stash *stash);
680 void i915_vm_free_pt_stash(struct i915_address_space *vm,
681 			   struct i915_vm_pt_stash *stash);
682 
683 struct i915_vma *
684 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
685 
686 struct i915_vma *
687 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
688 
689 static inline struct sgt_dma {
690 	struct scatterlist *sg;
691 	dma_addr_t dma, max;
sgt_dma(struct i915_vma_resource * vma_res)692 } sgt_dma(struct i915_vma_resource *vma_res) {
693 	struct scatterlist *sg = vma_res->bi.pages->sgl;
694 	dma_addr_t addr = sg_dma_address(sg);
695 
696 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
697 }
698 
699 bool i915_ggtt_require_binder(struct drm_i915_private *i915);
700 
701 #endif
702