xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_object.h (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2016 Intel Corporation
4  */
5 
6 #ifndef __I915_GEM_OBJECT_H__
7 #define __I915_GEM_OBJECT_H__
8 
9 #include <drm/drm_gem.h>
10 #include <drm/drm_file.h>
11 #include <drm/drm_device.h>
12 
13 #include "intel_memory_region.h"
14 #include "i915_gem_object_types.h"
15 #include "i915_gem_gtt.h"
16 #include "i915_gem_ww.h"
17 #include "i915_vma_types.h"
18 
19 enum intel_region_id;
20 
21 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
22 
23 static inline bool i915_gem_object_size_2big(u64 size)
24 {
25 	struct drm_i915_gem_object *obj;
26 
27 	if (overflows_type(size, obj->base.size))
28 		return true;
29 
30 	return false;
31 }
32 
33 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
34 				    enum i915_cache_level level);
35 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
36 				     enum i915_cache_level lvl);
37 void i915_gem_init__objects(struct drm_i915_private *i915);
38 
39 void i915_objects_module_exit(void);
40 int i915_objects_module_init(void);
41 
42 struct drm_i915_gem_object *i915_gem_object_alloc(void);
43 void i915_gem_object_free(struct drm_i915_gem_object *obj);
44 
45 void i915_gem_object_init(struct drm_i915_gem_object *obj,
46 			  const struct drm_i915_gem_object_ops *ops,
47 			  struct lock_class_key *key,
48 			  unsigned alloc_flags);
49 
50 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
51 
52 struct drm_i915_gem_object *
53 i915_gem_object_create_shmem(struct drm_i915_private *i915,
54 			     resource_size_t size);
55 struct drm_i915_gem_object *
56 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
57 				       const void *data, resource_size_t size);
58 struct drm_i915_gem_object *
59 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
60 			      struct intel_memory_region **placements,
61 			      unsigned int n_placements);
62 
63 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
64 
65 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
66 				     struct sg_table *pages,
67 				     bool needs_clflush);
68 
69 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
70 				const struct drm_i915_gem_pwrite *args);
71 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
72 			       const struct drm_i915_gem_pread *args);
73 
74 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
75 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
76 				     struct sg_table *pages);
77 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
78 				    struct sg_table *pages);
79 
80 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
81 
82 struct sg_table *
83 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
84 
85 /**
86  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
87  * @file: DRM file private date
88  * @handle: userspace handle
89  *
90  * Returns:
91  * A pointer to the object named by the handle if such exists on @filp, NULL
92  * otherwise. This object is only valid whilst under the RCU read lock, and
93  * note carefully the object may be in the process of being destroyed.
94  */
95 static inline struct drm_i915_gem_object *
96 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
97 {
98 #ifdef CONFIG_LOCKDEP
99 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
100 #endif
101 	return idr_find(&file->object_idr, handle);
102 }
103 
104 static inline struct drm_i915_gem_object *
105 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
106 {
107 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
108 		obj = NULL;
109 
110 	return obj;
111 }
112 
113 static inline struct drm_i915_gem_object *
114 i915_gem_object_lookup(struct drm_file *file, u32 handle)
115 {
116 	struct drm_i915_gem_object *obj;
117 
118 	rcu_read_lock();
119 	obj = i915_gem_object_lookup_rcu(file, handle);
120 	obj = i915_gem_object_get_rcu(obj);
121 	rcu_read_unlock();
122 
123 	return obj;
124 }
125 
126 __deprecated
127 struct drm_gem_object *
128 drm_gem_object_lookup(struct drm_file *file, u32 handle);
129 
130 __attribute__((nonnull))
131 static inline struct drm_i915_gem_object *
132 i915_gem_object_get(struct drm_i915_gem_object *obj)
133 {
134 	drm_gem_object_get(&obj->base);
135 	return obj;
136 }
137 
138 __attribute__((nonnull))
139 static inline void
140 i915_gem_object_put(struct drm_i915_gem_object *obj)
141 {
142 	__drm_gem_object_put(&obj->base);
143 }
144 
145 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
146 
147 /*
148  * If more than one potential simultaneous locker, assert held.
149  */
150 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
151 {
152 	/*
153 	 * Note mm list lookup is protected by
154 	 * kref_get_unless_zero().
155 	 */
156 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
157 	    kref_read(&obj->base.refcount) > 0)
158 		assert_object_held(obj);
159 }
160 
161 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
162 					 struct i915_gem_ww_ctx *ww,
163 					 bool intr)
164 {
165 	int ret;
166 
167 	if (intr)
168 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
169 	else
170 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
171 
172 	if (!ret && ww) {
173 		i915_gem_object_get(obj);
174 		list_add_tail(&obj->obj_link, &ww->obj_list);
175 	}
176 	if (ret == -EALREADY)
177 		ret = 0;
178 
179 	if (ret == -EDEADLK) {
180 		i915_gem_object_get(obj);
181 		ww->contended = obj;
182 	}
183 
184 	return ret;
185 }
186 
187 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
188 				       struct i915_gem_ww_ctx *ww)
189 {
190 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
191 }
192 
193 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
194 						     struct i915_gem_ww_ctx *ww)
195 {
196 	WARN_ON(ww && !ww->intr);
197 	return __i915_gem_object_lock(obj, ww, true);
198 }
199 
200 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
201 					   struct i915_gem_ww_ctx *ww)
202 {
203 	if (!ww)
204 		return dma_resv_trylock(obj->base.resv);
205 	else
206 		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
207 }
208 
209 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
210 {
211 	if (obj->ops->adjust_lru)
212 		obj->ops->adjust_lru(obj);
213 
214 	dma_resv_unlock(obj->base.resv);
215 }
216 
217 static inline void
218 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
219 {
220 	obj->flags |= I915_BO_READONLY;
221 }
222 
223 static inline bool
224 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
225 {
226 	return obj->flags & I915_BO_READONLY;
227 }
228 
229 static inline bool
230 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
231 {
232 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
233 }
234 
235 static inline bool
236 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
237 {
238 	return obj->flags & I915_BO_ALLOC_VOLATILE;
239 }
240 
241 static inline void
242 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
243 {
244 	obj->flags |= I915_BO_ALLOC_VOLATILE;
245 }
246 
247 static inline bool
248 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
249 {
250 	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
251 }
252 
253 static inline void
254 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
255 {
256 	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
257 }
258 
259 static inline void
260 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
261 {
262 	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
263 }
264 
265 static inline bool
266 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
267 {
268 	return obj->flags & I915_BO_PROTECTED;
269 }
270 
271 static inline bool
272 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
273 			 unsigned long flags)
274 {
275 	return obj->ops->flags & flags;
276 }
277 
278 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
279 
280 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
281 
282 static inline bool
283 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
284 {
285 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
286 }
287 
288 static inline bool
289 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
290 {
291 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
292 }
293 
294 static inline bool
295 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
296 {
297 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
298 }
299 
300 static inline bool
301 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
302 {
303 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
304 }
305 
306 static inline bool
307 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
308 {
309 	return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
310 }
311 
312 static inline unsigned int
313 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
314 {
315 	return obj->tiling_and_stride & TILING_MASK;
316 }
317 
318 static inline bool
319 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
320 {
321 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
322 }
323 
324 static inline unsigned int
325 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
326 {
327 	return obj->tiling_and_stride & STRIDE_MASK;
328 }
329 
330 static inline unsigned int
331 i915_gem_tile_height(unsigned int tiling)
332 {
333 	GEM_BUG_ON(!tiling);
334 	return tiling == I915_TILING_Y ? 32 : 8;
335 }
336 
337 static inline unsigned int
338 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
339 {
340 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
341 }
342 
343 static inline unsigned int
344 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
345 {
346 	return (i915_gem_object_get_stride(obj) *
347 		i915_gem_object_get_tile_height(obj));
348 }
349 
350 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
351 			       unsigned int tiling, unsigned int stride);
352 
353 /**
354  * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
355  * pointer and the target page position using pgoff_t n input argument and
356  * i915_gem_object_page_iter
357  * @obj: i915 GEM buffer object
358  * @iter: i915 GEM buffer object page iterator
359  * @n: page offset
360  * @offset: searched physical offset,
361  *          it will be used for returning physical page offset value
362  *
363  * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
364  *          Takes and releases the RCU lock to search the radix_tree of
365  *          i915_gem_object_page_iter.
366  *
367  * Returns:
368  * The target scatterlist pointer and the target page position.
369  *
370  * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
371  */
372 struct scatterlist *
373 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
374 				   struct i915_gem_object_page_iter *iter,
375 				   pgoff_t  n,
376 				   unsigned int *offset);
377 
378 /**
379  * i915_gem_object_page_iter_get_sg - wrapper macro for
380  * __i915_gem_object_page_iter_get_sg()
381  * @obj: i915 GEM buffer object
382  * @it: i915 GEM buffer object page iterator
383  * @n: page offset
384  * @offset: searched physical offset,
385  *          it will be used for returning physical page offset value
386  *
387  * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
388  *          Takes and releases the RCU lock to search the radix_tree of
389  *          i915_gem_object_page_iter.
390  *
391  * Returns:
392  * The target scatterlist pointer and the target page position.
393  *
394  * In order to avoid the truncation of the input parameter, it checks the page
395  * offset n's type from the input parameter before calling
396  * __i915_gem_object_page_iter_get_sg().
397  */
398 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({	\
399 	static_assert(castable_to_type(n, pgoff_t));		\
400 	__i915_gem_object_page_iter_get_sg(obj, it, n, offset);	\
401 })
402 
403 /**
404  * __i915_gem_object_get_sg - helper to find the target scatterlist
405  * pointer and the target page position using pgoff_t n input argument and
406  * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
407  * @obj: i915 GEM buffer object
408  * @n: page offset
409  * @offset: searched physical offset,
410  *          it will be used for returning physical page offset value
411  *
412  * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
413  * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
414  *
415  * Returns:
416  * The target scatterlist pointer and the target page position.
417  *
418  * Recommended to use wrapper macro: i915_gem_object_get_sg()
419  * See also __i915_gem_object_page_iter_get_sg()
420  */
421 static inline struct scatterlist *
422 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
423 			 unsigned int *offset)
424 {
425 	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
426 }
427 
428 /**
429  * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
430  * @obj: i915 GEM buffer object
431  * @n: page offset
432  * @offset: searched physical offset,
433  *          it will be used for returning physical page offset value
434  *
435  * Returns:
436  * The target scatterlist pointer and the target page position.
437  *
438  * In order to avoid the truncation of the input parameter, it checks the page
439  * offset n's type from the input parameter before calling
440  * __i915_gem_object_get_sg().
441  * See also __i915_gem_object_page_iter_get_sg()
442  */
443 #define i915_gem_object_get_sg(obj, n, offset) ({	\
444 	static_assert(castable_to_type(n, pgoff_t));	\
445 	__i915_gem_object_get_sg(obj, n, offset);	\
446 })
447 
448 /**
449  * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
450  * pointer and the target page position using pgoff_t n input argument and
451  * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
452  * @obj: i915 GEM buffer object
453  * @n: page offset
454  * @offset: searched physical offset,
455  *          it will be used for returning physical page offset value
456  *
457  * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
458  * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
459  *
460  * Returns:
461  * The target scatterlist pointer and the target page position.
462  *
463  * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
464  * See also __i915_gem_object_page_iter_get_sg()
465  */
466 static inline struct scatterlist *
467 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
468 			     unsigned int *offset)
469 {
470 	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
471 }
472 
473 /**
474  * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
475  * @obj: i915 GEM buffer object
476  * @n: page offset
477  * @offset: searched physical offset,
478  *          it will be used for returning physical page offset value
479  *
480  * Returns:
481  * The target scatterlist pointer and the target page position.
482  *
483  * In order to avoid the truncation of the input parameter, it checks the page
484  * offset n's type from the input parameter before calling
485  * __i915_gem_object_get_sg_dma().
486  * See also __i915_gem_object_page_iter_get_sg()
487  */
488 #define i915_gem_object_get_sg_dma(obj, n, offset) ({	\
489 	static_assert(castable_to_type(n, pgoff_t));	\
490 	__i915_gem_object_get_sg_dma(obj, n, offset);	\
491 })
492 
493 /**
494  * __i915_gem_object_get_page - helper to find the target page with a page offset
495  * @obj: i915 GEM buffer object
496  * @n: page offset
497  *
498  * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
499  * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
500  * internally.
501  *
502  * Returns:
503  * The target page pointer.
504  *
505  * Recommended to use wrapper macro: i915_gem_object_get_page()
506  * See also __i915_gem_object_page_iter_get_sg()
507  */
508 struct page *
509 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
510 
511 /**
512  * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
513  * @obj: i915 GEM buffer object
514  * @n: page offset
515  *
516  * Returns:
517  * The target page pointer.
518  *
519  * In order to avoid the truncation of the input parameter, it checks the page
520  * offset n's type from the input parameter before calling
521  * __i915_gem_object_get_page().
522  * See also __i915_gem_object_page_iter_get_sg()
523  */
524 #define i915_gem_object_get_page(obj, n) ({		\
525 	static_assert(castable_to_type(n, pgoff_t));	\
526 	__i915_gem_object_get_page(obj, n);		\
527 })
528 
529 /**
530  * __i915_gem_object_get_dirty_page - helper to find the target page with a page
531  * offset
532  * @obj: i915 GEM buffer object
533  * @n: page offset
534  *
535  * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
536  *
537  * Returns:
538  * The target page pointer.
539  *
540  * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
541  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
542  */
543 struct page *
544 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
545 
546 /**
547  * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
548  * @obj: i915 GEM buffer object
549  * @n: page offset
550  *
551  * Returns:
552  * The target page pointer.
553  *
554  * In order to avoid the truncation of the input parameter, it checks the page
555  * offset n's type from the input parameter before calling
556  * __i915_gem_object_get_dirty_page().
557  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
558  */
559 #define i915_gem_object_get_dirty_page(obj, n) ({	\
560 	static_assert(castable_to_type(n, pgoff_t));	\
561 	__i915_gem_object_get_dirty_page(obj, n);	\
562 })
563 
564 /**
565  * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
566  * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
567  * @obj: i915 GEM buffer object
568  * @n: page offset
569  * @len: DMA mapped scatterlist's DMA bus addresses length to return
570  *
571  * Returns:
572  * Bus addresses of targeted DMA mapped scatterlist
573  *
574  * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
575  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
576  */
577 dma_addr_t
578 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
579 				      unsigned int *len);
580 
581 /**
582  * i915_gem_object_get_dma_address_len - wrapper macro for
583  * __i915_gem_object_get_dma_address_len
584  * @obj: i915 GEM buffer object
585  * @n: page offset
586  * @len: DMA mapped scatterlist's DMA bus addresses length to return
587  *
588  * Returns:
589  * Bus addresses of targeted DMA mapped scatterlist
590  *
591  * In order to avoid the truncation of the input parameter, it checks the page
592  * offset n's type from the input parameter before calling
593  * __i915_gem_object_get_dma_address_len().
594  * See also __i915_gem_object_page_iter_get_sg() and
595  * __i915_gem_object_get_dma_address_len()
596  */
597 #define i915_gem_object_get_dma_address_len(obj, n, len) ({	\
598 	static_assert(castable_to_type(n, pgoff_t));		\
599 	__i915_gem_object_get_dma_address_len(obj, n, len);	\
600 })
601 
602 /**
603  * __i915_gem_object_get_dma_address - helper to get bus addresses of
604  * targeted DMA mapped scatterlist from i915 GEM buffer object
605  * @obj: i915 GEM buffer object
606  * @n: page offset
607  *
608  * Returns:
609  * Bus addresses of targeted DMA mapped scatterlis
610  *
611  * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
612  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
613  */
614 dma_addr_t
615 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
616 
617 /**
618  * i915_gem_object_get_dma_address - wrapper macro for
619  * __i915_gem_object_get_dma_address
620  * @obj: i915 GEM buffer object
621  * @n: page offset
622  *
623  * Returns:
624  * Bus addresses of targeted DMA mapped scatterlist
625  *
626  * In order to avoid the truncation of the input parameter, it checks the page
627  * offset n's type from the input parameter before calling
628  * __i915_gem_object_get_dma_address().
629  * See also __i915_gem_object_page_iter_get_sg() and
630  * __i915_gem_object_get_dma_address()
631  */
632 #define i915_gem_object_get_dma_address(obj, n) ({	\
633 	static_assert(castable_to_type(n, pgoff_t));	\
634 	__i915_gem_object_get_dma_address(obj, n);	\
635 })
636 
637 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
638 				 struct sg_table *pages);
639 
640 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
641 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
642 
643 static inline int __must_check
644 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
645 {
646 	assert_object_held(obj);
647 
648 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
649 		return 0;
650 
651 	return __i915_gem_object_get_pages(obj);
652 }
653 
654 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
655 
656 static inline bool
657 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
658 {
659 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
660 }
661 
662 static inline void
663 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
664 {
665 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
666 
667 	atomic_inc(&obj->mm.pages_pin_count);
668 }
669 
670 static inline bool
671 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
672 {
673 	return atomic_read(&obj->mm.pages_pin_count);
674 }
675 
676 static inline void
677 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
678 {
679 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
680 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
681 
682 	atomic_dec(&obj->mm.pages_pin_count);
683 }
684 
685 static inline void
686 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
687 {
688 	__i915_gem_object_unpin_pages(obj);
689 }
690 
691 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
692 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
693 
694 /**
695  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
696  * @obj: the object to map into kernel address space
697  * @type: the type of mapping, used to select pgprot_t
698  *
699  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
700  * pages and then returns a contiguous mapping of the backing storage into
701  * the kernel address space. Based on the @type of mapping, the PTE will be
702  * set to either WriteBack or WriteCombine (via pgprot_t).
703  *
704  * The caller is responsible for calling i915_gem_object_unpin_map() when the
705  * mapping is no longer required.
706  *
707  * Returns the pointer through which to access the mapped object, or an
708  * ERR_PTR() on error.
709  */
710 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
711 					   enum i915_map_type type);
712 
713 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
714 						    enum i915_map_type type);
715 
716 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
717 				 unsigned long offset,
718 				 unsigned long size);
719 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
720 {
721 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
722 }
723 
724 /**
725  * i915_gem_object_unpin_map - releases an earlier mapping
726  * @obj: the object to unmap
727  *
728  * After pinning the object and mapping its pages, once you are finished
729  * with your access, call i915_gem_object_unpin_map() to release the pin
730  * upon the mapping. Once the pin count reaches zero, that mapping may be
731  * removed.
732  */
733 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
734 {
735 	i915_gem_object_unpin_pages(obj);
736 }
737 
738 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
739 
740 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
741 				 unsigned int *needs_clflush);
742 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
743 				  unsigned int *needs_clflush);
744 #define CLFLUSH_BEFORE	BIT(0)
745 #define CLFLUSH_AFTER	BIT(1)
746 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
747 
748 static inline void
749 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
750 {
751 	i915_gem_object_unpin_pages(obj);
752 }
753 
754 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
755 				     struct dma_fence **fence);
756 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
757 				      bool intr);
758 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
759 
760 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
761 					 unsigned int cache_level);
762 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
763 				   unsigned int pat_index);
764 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
765 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
766 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
767 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
768 
769 int __must_check
770 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
771 int __must_check
772 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
773 int __must_check
774 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
775 struct i915_vma * __must_check
776 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
777 				     struct i915_gem_ww_ctx *ww,
778 				     u32 alignment, unsigned int guard,
779 				     const struct i915_gtt_view *view,
780 				     unsigned int flags);
781 
782 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
783 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
784 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
785 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
786 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
787 
788 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
789 {
790 	obj->read_domains = I915_GEM_DOMAIN_CPU;
791 	obj->write_domain = I915_GEM_DOMAIN_CPU;
792 	if (i915_gem_cpu_write_needs_clflush(obj))
793 		obj->cache_dirty = true;
794 }
795 
796 void i915_gem_fence_wait_priority(struct dma_fence *fence,
797 				  const struct i915_sched_attr *attr);
798 
799 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
800 			 unsigned int flags,
801 			 long timeout);
802 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
803 				  unsigned int flags,
804 				  const struct i915_sched_attr *attr);
805 
806 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
807 
808 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
809 
810 void __i915_gem_free_object_rcu(struct rcu_head *head);
811 
812 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
813 
814 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
815 
816 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
817 
818 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
819 
820 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
821 			    struct i915_gem_ww_ctx *ww,
822 			    enum intel_region_id id);
823 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
824 			      struct i915_gem_ww_ctx *ww,
825 			      enum intel_region_id id,
826 			      unsigned int flags);
827 
828 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
829 				 enum intel_region_id id);
830 
831 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
832 				   unsigned int flags);
833 
834 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
835 					enum intel_memory_type type);
836 
837 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
838 
839 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
840 			 size_t size, struct intel_memory_region *mr,
841 			 struct address_space *mapping,
842 			 unsigned int max_segment);
843 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
844 			 bool dirty, bool backup);
845 void __shmem_writeback(size_t size, struct address_space *mapping);
846 
847 #ifdef CONFIG_MMU_NOTIFIER
848 static inline bool
849 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
850 {
851 	return obj->userptr.notifier.mm;
852 }
853 
854 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
855 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
856 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
857 #else
858 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
859 
860 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
861 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
862 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
863 
864 #endif
865 
866 #endif
867