xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_object.h (revision 1c07425e902cd3137961c3d45b4271bf8a9b8eb9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include "display/intel_frontbuffer.h"
15 #include "intel_memory_region.h"
16 #include "i915_gem_object_types.h"
17 #include "i915_gem_gtt.h"
18 #include "i915_gem_ww.h"
19 #include "i915_vma_types.h"
20 
21 enum intel_region_id;
22 
23 /*
24  * XXX: There is a prevalence of the assumption that we fit the
25  * object's page count inside a 32bit _signed_ variable. Let's document
26  * this and catch if we ever need to fix it. In the meantime, if you do
27  * spot such a local variable, please consider fixing!
28  *
29  * Aside from our own locals (for which we have no excuse!):
30  * - sg_table embeds unsigned int for num_pages
31  * - get_user_pages*() mixed ints with longs
32  */
33 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
34 	GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
35 
36 static inline bool i915_gem_object_size_2big(u64 size)
37 {
38 	struct drm_i915_gem_object *obj;
39 
40 	if (GEM_CHECK_SIZE_OVERFLOW(size))
41 		return true;
42 
43 	if (overflows_type(size, obj->base.size))
44 		return true;
45 
46 	return false;
47 }
48 
49 void i915_gem_init__objects(struct drm_i915_private *i915);
50 
51 void i915_objects_module_exit(void);
52 int i915_objects_module_init(void);
53 
54 struct drm_i915_gem_object *i915_gem_object_alloc(void);
55 void i915_gem_object_free(struct drm_i915_gem_object *obj);
56 
57 void i915_gem_object_init(struct drm_i915_gem_object *obj,
58 			  const struct drm_i915_gem_object_ops *ops,
59 			  struct lock_class_key *key,
60 			  unsigned alloc_flags);
61 
62 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
63 
64 struct drm_i915_gem_object *
65 i915_gem_object_create_shmem(struct drm_i915_private *i915,
66 			     resource_size_t size);
67 struct drm_i915_gem_object *
68 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
69 				       const void *data, resource_size_t size);
70 struct drm_i915_gem_object *
71 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
72 			      struct intel_memory_region **placements,
73 			      unsigned int n_placements);
74 
75 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
76 
77 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
78 				     struct sg_table *pages,
79 				     bool needs_clflush);
80 
81 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
82 				const struct drm_i915_gem_pwrite *args);
83 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
84 			       const struct drm_i915_gem_pread *args);
85 
86 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
87 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
88 				     struct sg_table *pages);
89 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
90 				    struct sg_table *pages);
91 
92 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
93 
94 struct sg_table *
95 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
96 
97 /**
98  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
99  * @filp: DRM file private date
100  * @handle: userspace handle
101  *
102  * Returns:
103  *
104  * A pointer to the object named by the handle if such exists on @filp, NULL
105  * otherwise. This object is only valid whilst under the RCU read lock, and
106  * note carefully the object may be in the process of being destroyed.
107  */
108 static inline struct drm_i915_gem_object *
109 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
110 {
111 #ifdef CONFIG_LOCKDEP
112 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
113 #endif
114 	return idr_find(&file->object_idr, handle);
115 }
116 
117 static inline struct drm_i915_gem_object *
118 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
119 {
120 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
121 		obj = NULL;
122 
123 	return obj;
124 }
125 
126 static inline struct drm_i915_gem_object *
127 i915_gem_object_lookup(struct drm_file *file, u32 handle)
128 {
129 	struct drm_i915_gem_object *obj;
130 
131 	rcu_read_lock();
132 	obj = i915_gem_object_lookup_rcu(file, handle);
133 	obj = i915_gem_object_get_rcu(obj);
134 	rcu_read_unlock();
135 
136 	return obj;
137 }
138 
139 __deprecated
140 struct drm_gem_object *
141 drm_gem_object_lookup(struct drm_file *file, u32 handle);
142 
143 __attribute__((nonnull))
144 static inline struct drm_i915_gem_object *
145 i915_gem_object_get(struct drm_i915_gem_object *obj)
146 {
147 	drm_gem_object_get(&obj->base);
148 	return obj;
149 }
150 
151 __attribute__((nonnull))
152 static inline void
153 i915_gem_object_put(struct drm_i915_gem_object *obj)
154 {
155 	__drm_gem_object_put(&obj->base);
156 }
157 
158 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
159 
160 /*
161  * If more than one potential simultaneous locker, assert held.
162  */
163 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
164 {
165 	/*
166 	 * Note mm list lookup is protected by
167 	 * kref_get_unless_zero().
168 	 */
169 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
170 	    kref_read(&obj->base.refcount) > 0)
171 		assert_object_held(obj);
172 }
173 
174 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
175 					 struct i915_gem_ww_ctx *ww,
176 					 bool intr)
177 {
178 	int ret;
179 
180 	if (intr)
181 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
182 	else
183 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
184 
185 	if (!ret && ww) {
186 		i915_gem_object_get(obj);
187 		list_add_tail(&obj->obj_link, &ww->obj_list);
188 	}
189 	if (ret == -EALREADY)
190 		ret = 0;
191 
192 	if (ret == -EDEADLK) {
193 		i915_gem_object_get(obj);
194 		ww->contended = obj;
195 	}
196 
197 	return ret;
198 }
199 
200 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
201 				       struct i915_gem_ww_ctx *ww)
202 {
203 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
204 }
205 
206 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
207 						     struct i915_gem_ww_ctx *ww)
208 {
209 	WARN_ON(ww && !ww->intr);
210 	return __i915_gem_object_lock(obj, ww, true);
211 }
212 
213 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
214 					   struct i915_gem_ww_ctx *ww)
215 {
216 	if (!ww)
217 		return dma_resv_trylock(obj->base.resv);
218 	else
219 		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
220 }
221 
222 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
223 {
224 	if (obj->ops->adjust_lru)
225 		obj->ops->adjust_lru(obj);
226 
227 	dma_resv_unlock(obj->base.resv);
228 }
229 
230 static inline void
231 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
232 {
233 	obj->flags |= I915_BO_READONLY;
234 }
235 
236 static inline bool
237 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
238 {
239 	return obj->flags & I915_BO_READONLY;
240 }
241 
242 static inline bool
243 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
244 {
245 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
246 }
247 
248 static inline bool
249 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
250 {
251 	return obj->flags & I915_BO_ALLOC_VOLATILE;
252 }
253 
254 static inline void
255 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
256 {
257 	obj->flags |= I915_BO_ALLOC_VOLATILE;
258 }
259 
260 static inline bool
261 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
262 {
263 	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
264 }
265 
266 static inline void
267 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
268 {
269 	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
270 }
271 
272 static inline void
273 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
274 {
275 	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
276 }
277 
278 static inline bool
279 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
280 {
281 	return obj->flags & I915_BO_PROTECTED;
282 }
283 
284 static inline bool
285 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
286 			 unsigned long flags)
287 {
288 	return obj->ops->flags & flags;
289 }
290 
291 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
292 
293 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
294 
295 static inline bool
296 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
297 {
298 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
299 }
300 
301 static inline bool
302 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
303 {
304 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
305 }
306 
307 static inline bool
308 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
309 {
310 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
311 }
312 
313 static inline bool
314 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
315 {
316 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
317 }
318 
319 static inline bool
320 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
321 {
322 	return READ_ONCE(obj->frontbuffer);
323 }
324 
325 static inline unsigned int
326 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
327 {
328 	return obj->tiling_and_stride & TILING_MASK;
329 }
330 
331 static inline bool
332 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
333 {
334 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
335 }
336 
337 static inline unsigned int
338 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
339 {
340 	return obj->tiling_and_stride & STRIDE_MASK;
341 }
342 
343 static inline unsigned int
344 i915_gem_tile_height(unsigned int tiling)
345 {
346 	GEM_BUG_ON(!tiling);
347 	return tiling == I915_TILING_Y ? 32 : 8;
348 }
349 
350 static inline unsigned int
351 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
352 {
353 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
354 }
355 
356 static inline unsigned int
357 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
358 {
359 	return (i915_gem_object_get_stride(obj) *
360 		i915_gem_object_get_tile_height(obj));
361 }
362 
363 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
364 			       unsigned int tiling, unsigned int stride);
365 
366 struct scatterlist *
367 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
368 			 struct i915_gem_object_page_iter *iter,
369 			 unsigned int n,
370 			 unsigned int *offset, bool dma);
371 
372 static inline struct scatterlist *
373 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
374 		       unsigned int n,
375 		       unsigned int *offset)
376 {
377 	return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
378 }
379 
380 static inline struct scatterlist *
381 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
382 			   unsigned int n,
383 			   unsigned int *offset)
384 {
385 	return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
386 }
387 
388 struct page *
389 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
390 			 unsigned int n);
391 
392 struct page *
393 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
394 			       unsigned int n);
395 
396 dma_addr_t
397 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
398 				    unsigned long n,
399 				    unsigned int *len);
400 
401 dma_addr_t
402 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
403 				unsigned long n);
404 
405 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
406 				 struct sg_table *pages);
407 
408 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
409 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
410 
411 static inline int __must_check
412 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
413 {
414 	assert_object_held(obj);
415 
416 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
417 		return 0;
418 
419 	return __i915_gem_object_get_pages(obj);
420 }
421 
422 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
423 
424 static inline bool
425 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
426 {
427 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
428 }
429 
430 static inline void
431 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
432 {
433 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
434 
435 	atomic_inc(&obj->mm.pages_pin_count);
436 }
437 
438 static inline bool
439 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
440 {
441 	return atomic_read(&obj->mm.pages_pin_count);
442 }
443 
444 static inline void
445 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
446 {
447 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
448 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
449 
450 	atomic_dec(&obj->mm.pages_pin_count);
451 }
452 
453 static inline void
454 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
455 {
456 	__i915_gem_object_unpin_pages(obj);
457 }
458 
459 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
460 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
461 
462 /**
463  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
464  * @obj: the object to map into kernel address space
465  * @type: the type of mapping, used to select pgprot_t
466  *
467  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
468  * pages and then returns a contiguous mapping of the backing storage into
469  * the kernel address space. Based on the @type of mapping, the PTE will be
470  * set to either WriteBack or WriteCombine (via pgprot_t).
471  *
472  * The caller is responsible for calling i915_gem_object_unpin_map() when the
473  * mapping is no longer required.
474  *
475  * Returns the pointer through which to access the mapped object, or an
476  * ERR_PTR() on error.
477  */
478 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
479 					   enum i915_map_type type);
480 
481 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
482 						    enum i915_map_type type);
483 
484 enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
485 					  struct drm_i915_gem_object *obj,
486 					  bool always_coherent);
487 
488 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
489 				 unsigned long offset,
490 				 unsigned long size);
491 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
492 {
493 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
494 }
495 
496 /**
497  * i915_gem_object_unpin_map - releases an earlier mapping
498  * @obj: the object to unmap
499  *
500  * After pinning the object and mapping its pages, once you are finished
501  * with your access, call i915_gem_object_unpin_map() to release the pin
502  * upon the mapping. Once the pin count reaches zero, that mapping may be
503  * removed.
504  */
505 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
506 {
507 	i915_gem_object_unpin_pages(obj);
508 }
509 
510 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
511 
512 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
513 				 unsigned int *needs_clflush);
514 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
515 				  unsigned int *needs_clflush);
516 #define CLFLUSH_BEFORE	BIT(0)
517 #define CLFLUSH_AFTER	BIT(1)
518 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
519 
520 static inline void
521 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
522 {
523 	i915_gem_object_unpin_pages(obj);
524 }
525 
526 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
527 				     struct dma_fence **fence);
528 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
529 				      bool intr);
530 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
531 
532 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
533 					 unsigned int cache_level);
534 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
535 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
536 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
537 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
538 
539 int __must_check
540 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
541 int __must_check
542 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
543 int __must_check
544 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
545 struct i915_vma * __must_check
546 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
547 				     struct i915_gem_ww_ctx *ww,
548 				     u32 alignment,
549 				     const struct i915_gtt_view *view,
550 				     unsigned int flags);
551 
552 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
553 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
554 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
555 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
556 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
557 
558 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
559 {
560 	obj->read_domains = I915_GEM_DOMAIN_CPU;
561 	obj->write_domain = I915_GEM_DOMAIN_CPU;
562 	if (i915_gem_cpu_write_needs_clflush(obj))
563 		obj->cache_dirty = true;
564 }
565 
566 void i915_gem_fence_wait_priority(struct dma_fence *fence,
567 				  const struct i915_sched_attr *attr);
568 
569 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
570 			 unsigned int flags,
571 			 long timeout);
572 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
573 				  unsigned int flags,
574 				  const struct i915_sched_attr *attr);
575 
576 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
577 					 enum fb_op_origin origin);
578 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
579 					      enum fb_op_origin origin);
580 
581 static inline void
582 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
583 				  enum fb_op_origin origin)
584 {
585 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
586 		__i915_gem_object_flush_frontbuffer(obj, origin);
587 }
588 
589 static inline void
590 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
591 				       enum fb_op_origin origin)
592 {
593 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
594 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
595 }
596 
597 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
598 
599 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
600 
601 void __i915_gem_free_object_rcu(struct rcu_head *head);
602 
603 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
604 
605 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
606 
607 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
608 
609 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
610 
611 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
612 			    struct i915_gem_ww_ctx *ww,
613 			    enum intel_region_id id);
614 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
615 			      struct i915_gem_ww_ctx *ww,
616 			      enum intel_region_id id,
617 			      unsigned int flags);
618 
619 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
620 				 enum intel_region_id id);
621 
622 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
623 				   unsigned int flags);
624 
625 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
626 					enum intel_memory_type type);
627 
628 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
629 
630 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
631 			 size_t size, struct intel_memory_region *mr,
632 			 struct address_space *mapping,
633 			 unsigned int max_segment);
634 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
635 			 bool dirty, bool backup);
636 void __shmem_writeback(size_t size, struct address_space *mapping);
637 
638 #ifdef CONFIG_MMU_NOTIFIER
639 static inline bool
640 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
641 {
642 	return obj->userptr.notifier.mm;
643 }
644 
645 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
646 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
647 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
648 #else
649 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
650 
651 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
652 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
653 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
654 
655 #endif
656 
657 #endif
658