xref: /linux/include/drm/drm_gpuvm.h (revision ab779466166348eecf17d20f620aa9a47965c934)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 
3 #ifndef __DRM_GPUVM_H__
4 #define __DRM_GPUVM_H__
5 
6 /*
7  * Copyright (c) 2022 Red Hat.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/dma-resv.h>
29 #include <linux/list.h>
30 #include <linux/rbtree.h>
31 #include <linux/types.h>
32 
33 #include <drm/drm_device.h>
34 #include <drm/drm_gem.h>
35 #include <drm/drm_exec.h>
36 
37 struct drm_gpuvm;
38 struct drm_gpuvm_bo;
39 struct drm_gpuvm_ops;
40 
41 /**
42  * enum drm_gpuva_flags - flags for struct drm_gpuva
43  */
44 enum drm_gpuva_flags {
45 	/**
46 	 * @DRM_GPUVA_INVALIDATED:
47 	 *
48 	 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
49 	 */
50 	DRM_GPUVA_INVALIDATED = (1 << 0),
51 
52 	/**
53 	 * @DRM_GPUVA_SPARSE:
54 	 *
55 	 * Flag indicating that the &drm_gpuva is a sparse mapping.
56 	 */
57 	DRM_GPUVA_SPARSE = (1 << 1),
58 
59 	/**
60 	 * @DRM_GPUVA_USERBITS: user defined bits
61 	 */
62 	DRM_GPUVA_USERBITS = (1 << 2),
63 };
64 
65 /**
66  * struct drm_gpuva - structure to track a GPU VA mapping
67  *
68  * This structure represents a GPU VA mapping and is associated with a
69  * &drm_gpuvm.
70  *
71  * Typically, this structure is embedded in bigger driver structures.
72  */
73 struct drm_gpuva {
74 	/**
75 	 * @vm: the &drm_gpuvm this object is associated with
76 	 */
77 	struct drm_gpuvm *vm;
78 
79 	/**
80 	 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
81 	 * &drm_gem_object
82 	 */
83 	struct drm_gpuvm_bo *vm_bo;
84 
85 	/**
86 	 * @flags: the &drm_gpuva_flags for this mapping
87 	 */
88 	enum drm_gpuva_flags flags;
89 
90 	/**
91 	 * @va: structure containing the address and range of the &drm_gpuva
92 	 */
93 	struct {
94 		/**
95 		 * @addr: the start address
96 		 */
97 		u64 addr;
98 
99 		/*
100 		 * @range: the range
101 		 */
102 		u64 range;
103 	} va;
104 
105 	/**
106 	 * @gem: structure containing the &drm_gem_object and it's offset
107 	 */
108 	struct {
109 		/**
110 		 * @offset: the offset within the &drm_gem_object
111 		 */
112 		u64 offset;
113 
114 		/**
115 		 * @obj: the mapped &drm_gem_object
116 		 */
117 		struct drm_gem_object *obj;
118 
119 		/**
120 		 * @entry: the &list_head to attach this object to a &drm_gpuvm_bo
121 		 */
122 		struct list_head entry;
123 	} gem;
124 
125 	/**
126 	 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
127 	 */
128 	struct {
129 		/**
130 		 * @rb: the rb-tree node
131 		 */
132 		struct rb_node node;
133 
134 		/**
135 		 * @entry: The &list_head to additionally connect &drm_gpuvas
136 		 * in the same order they appear in the interval tree. This is
137 		 * useful to keep iterating &drm_gpuvas from a start node found
138 		 * through the rb-tree while doing modifications on the rb-tree
139 		 * itself.
140 		 */
141 		struct list_head entry;
142 
143 		/**
144 		 * @__subtree_last: needed by the interval tree, holding last-in-subtree
145 		 */
146 		u64 __subtree_last;
147 	} rb;
148 };
149 
150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
151 void drm_gpuva_remove(struct drm_gpuva *va);
152 
153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
154 void drm_gpuva_unlink(struct drm_gpuva *va);
155 
156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
157 				 u64 addr, u64 range);
158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
159 				       u64 addr, u64 range);
160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
162 
163 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
164 				  struct drm_gem_object *obj, u64 offset)
165 {
166 	va->va.addr = addr;
167 	va->va.range = range;
168 	va->gem.obj = obj;
169 	va->gem.offset = offset;
170 }
171 
172 /**
173  * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
174  * invalidated
175  * @va: the &drm_gpuva to set the invalidate flag for
176  * @invalidate: indicates whether the &drm_gpuva is invalidated
177  */
178 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
179 {
180 	if (invalidate)
181 		va->flags |= DRM_GPUVA_INVALIDATED;
182 	else
183 		va->flags &= ~DRM_GPUVA_INVALIDATED;
184 }
185 
186 /**
187  * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
188  * is invalidated
189  * @va: the &drm_gpuva to check
190  */
191 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
192 {
193 	return va->flags & DRM_GPUVA_INVALIDATED;
194 }
195 
196 /**
197  * enum drm_gpuvm_flags - flags for struct drm_gpuvm
198  */
199 enum drm_gpuvm_flags {
200 	/**
201 	 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
202 	 * GPUVM's &dma_resv lock
203 	 */
204 	DRM_GPUVM_RESV_PROTECTED = BIT(0),
205 
206 	/**
207 	 * @DRM_GPUVM_USERBITS: user defined bits
208 	 */
209 	DRM_GPUVM_USERBITS = BIT(1),
210 };
211 
212 /**
213  * struct drm_gpuvm - DRM GPU VA Manager
214  *
215  * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
216  * &maple_tree structures. Typically, this structure is embedded in bigger
217  * driver structures.
218  *
219  * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
220  * pages.
221  *
222  * There should be one manager instance per GPU virtual address space.
223  */
224 struct drm_gpuvm {
225 	/**
226 	 * @name: the name of the DRM GPU VA space
227 	 */
228 	const char *name;
229 
230 	/**
231 	 * @flags: the &drm_gpuvm_flags of this GPUVM
232 	 */
233 	enum drm_gpuvm_flags flags;
234 
235 	/**
236 	 * @drm: the &drm_device this VM lives in
237 	 */
238 	struct drm_device *drm;
239 
240 	/**
241 	 * @mm_start: start of the VA space
242 	 */
243 	u64 mm_start;
244 
245 	/**
246 	 * @mm_range: length of the VA space
247 	 */
248 	u64 mm_range;
249 
250 	/**
251 	 * @rb: structures to track &drm_gpuva entries
252 	 */
253 	struct {
254 		/**
255 		 * @tree: the rb-tree to track GPU VA mappings
256 		 */
257 		struct rb_root_cached tree;
258 
259 		/**
260 		 * @list: the &list_head to track GPU VA mappings
261 		 */
262 		struct list_head list;
263 	} rb;
264 
265 	/**
266 	 * @kref: reference count of this object
267 	 */
268 	struct kref kref;
269 
270 	/**
271 	 * @kernel_alloc_node:
272 	 *
273 	 * &drm_gpuva representing the address space cutout reserved for
274 	 * the kernel
275 	 */
276 	struct drm_gpuva kernel_alloc_node;
277 
278 	/**
279 	 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
280 	 */
281 	const struct drm_gpuvm_ops *ops;
282 
283 	/**
284 	 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
285 	 */
286 	struct drm_gem_object *r_obj;
287 
288 	/**
289 	 * @extobj: structure holding the extobj list
290 	 */
291 	struct {
292 		/**
293 		 * @list: &list_head storing &drm_gpuvm_bos serving as
294 		 * external object
295 		 */
296 		struct list_head list;
297 
298 		/**
299 		 * @local_list: pointer to the local list temporarily storing
300 		 * entries from the external object list
301 		 */
302 		struct list_head *local_list;
303 
304 		/**
305 		 * @lock: spinlock to protect the extobj list
306 		 */
307 		spinlock_t lock;
308 	} extobj;
309 
310 	/**
311 	 * @evict: structure holding the evict list and evict list lock
312 	 */
313 	struct {
314 		/**
315 		 * @list: &list_head storing &drm_gpuvm_bos currently being
316 		 * evicted
317 		 */
318 		struct list_head list;
319 
320 		/**
321 		 * @local_list: pointer to the local list temporarily storing
322 		 * entries from the evicted object list
323 		 */
324 		struct list_head *local_list;
325 
326 		/**
327 		 * @lock: spinlock to protect the evict list
328 		 */
329 		spinlock_t lock;
330 	} evict;
331 };
332 
333 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
334 		    enum drm_gpuvm_flags flags,
335 		    struct drm_device *drm,
336 		    struct drm_gem_object *r_obj,
337 		    u64 start_offset, u64 range,
338 		    u64 reserve_offset, u64 reserve_range,
339 		    const struct drm_gpuvm_ops *ops);
340 
341 /**
342  * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
343  * @gpuvm: the &drm_gpuvm to acquire the reference of
344  *
345  * This function acquires an additional reference to @gpuvm. It is illegal to
346  * call this without already holding a reference. No locks required.
347  */
348 static inline struct drm_gpuvm *
349 drm_gpuvm_get(struct drm_gpuvm *gpuvm)
350 {
351 	kref_get(&gpuvm->kref);
352 
353 	return gpuvm;
354 }
355 
356 void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
357 
358 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
359 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
360 
361 struct drm_gem_object *
362 drm_gpuvm_resv_object_alloc(struct drm_device *drm);
363 
364 /**
365  * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
366  * set
367  * @gpuvm: the &drm_gpuvm
368  *
369  * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
370  */
371 static inline bool
372 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
373 {
374 	return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
375 }
376 
377 /**
378  * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
379  * @gpuvm__: the &drm_gpuvm
380  *
381  * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
382  */
383 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
384 
385 /**
386  * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
387  * &dma_resv
388  * @gpuvm__: the &drm_gpuvm
389  *
390  * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
391  * &dma_resv
392  */
393 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
394 
395 #define drm_gpuvm_resv_held(gpuvm__) \
396 	dma_resv_held(drm_gpuvm_resv(gpuvm__))
397 
398 #define drm_gpuvm_resv_assert_held(gpuvm__) \
399 	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
400 
401 #define drm_gpuvm_resv_held(gpuvm__) \
402 	dma_resv_held(drm_gpuvm_resv(gpuvm__))
403 
404 #define drm_gpuvm_resv_assert_held(gpuvm__) \
405 	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
406 
407 /**
408  * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
409  * external object
410  * @gpuvm: the &drm_gpuvm to check
411  * @obj: the &drm_gem_object to check
412  *
413  * Returns: true if the &drm_gem_object &dma_resv differs from the
414  * &drm_gpuvms &dma_resv, false otherwise
415  */
416 static inline bool
417 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
418 		    struct drm_gem_object *obj)
419 {
420 	return obj && obj->resv != drm_gpuvm_resv(gpuvm);
421 }
422 
423 static inline struct drm_gpuva *
424 __drm_gpuva_next(struct drm_gpuva *va)
425 {
426 	if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
427 		return list_next_entry(va, rb.entry);
428 
429 	return NULL;
430 }
431 
432 /**
433  * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
434  * @va__: &drm_gpuva structure to assign to in each iteration step
435  * @gpuvm__: &drm_gpuvm to walk over
436  * @start__: starting offset, the first gpuva will overlap this
437  * @end__: ending offset, the last gpuva will start before this (but may
438  * overlap)
439  *
440  * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
441  * between @start__ and @end__. It is implemented similarly to list_for_each(),
442  * but is using the &drm_gpuvm's internal interval tree to accelerate
443  * the search for the starting &drm_gpuva, and hence isn't safe against removal
444  * of elements. It assumes that @end__ is within (or is the upper limit of) the
445  * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
446  * @kernel_alloc_node.
447  */
448 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
449 	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
450 	     va__ && (va__->va.addr < (end__)); \
451 	     va__ = __drm_gpuva_next(va__))
452 
453 /**
454  * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
455  * &drm_gpuvas
456  * @va__: &drm_gpuva to assign to in each iteration step
457  * @next__: another &drm_gpuva to use as temporary storage
458  * @gpuvm__: &drm_gpuvm to walk over
459  * @start__: starting offset, the first gpuva will overlap this
460  * @end__: ending offset, the last gpuva will start before this (but may
461  * overlap)
462  *
463  * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
464  * between @start__ and @end__. It is implemented similarly to
465  * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
466  * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
467  * against removal of elements. It assumes that @end__ is within (or is the
468  * upper limit of) the &drm_gpuvm. This iterator does not skip over the
469  * &drm_gpuvm's @kernel_alloc_node.
470  */
471 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
472 	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
473 	     next__ = __drm_gpuva_next(va__); \
474 	     va__ && (va__->va.addr < (end__)); \
475 	     va__ = next__, next__ = __drm_gpuva_next(va__))
476 
477 /**
478  * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
479  * @va__: &drm_gpuva to assign to in each iteration step
480  * @gpuvm__: &drm_gpuvm to walk over
481  *
482  * This iterator walks over all &drm_gpuva structures associated with the given
483  * &drm_gpuvm.
484  */
485 #define drm_gpuvm_for_each_va(va__, gpuvm__) \
486 	list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
487 
488 /**
489  * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
490  * @va__: &drm_gpuva to assign to in each iteration step
491  * @next__: another &drm_gpuva to use as temporary storage
492  * @gpuvm__: &drm_gpuvm to walk over
493  *
494  * This iterator walks over all &drm_gpuva structures associated with the given
495  * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
496  * hence safe against the removal of elements.
497  */
498 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
499 	list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
500 
501 /**
502  * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
503  *
504  * This structure should be created on the stack as &drm_exec should be.
505  *
506  * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
507  */
508 struct drm_gpuvm_exec {
509 	/**
510 	 * @exec: the &drm_exec structure
511 	 */
512 	struct drm_exec exec;
513 
514 	/**
515 	 * @flags: the flags for the struct drm_exec
516 	 */
517 	uint32_t flags;
518 
519 	/**
520 	 * @vm: the &drm_gpuvm to lock its DMA reservations
521 	 */
522 	struct drm_gpuvm *vm;
523 
524 	/**
525 	 * @num_fences: the number of fences to reserve for the &dma_resv of the
526 	 * locked &drm_gem_objects
527 	 */
528 	unsigned int num_fences;
529 
530 	/**
531 	 * @extra: Callback and corresponding private data for the driver to
532 	 * lock arbitrary additional &drm_gem_objects.
533 	 */
534 	struct {
535 		/**
536 		 * @fn: The driver callback to lock additional &drm_gem_objects.
537 		 */
538 		int (*fn)(struct drm_gpuvm_exec *vm_exec);
539 
540 		/**
541 		 * @priv: driver private data for the @fn callback
542 		 */
543 		void *priv;
544 	} extra;
545 };
546 
547 /**
548  * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
549  * @gpuvm: the &drm_gpuvm
550  * @exec: the &drm_exec context
551  * @num_fences: the amount of &dma_fences to reserve
552  *
553  * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object.
554  *
555  * Using this function directly, it is the drivers responsibility to call
556  * drm_exec_init() and drm_exec_fini() accordingly.
557  *
558  * Returns: 0 on success, negative error code on failure.
559  */
560 static inline int
561 drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
562 		     struct drm_exec *exec,
563 		     unsigned int num_fences)
564 {
565 	return drm_exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
566 }
567 
568 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
569 			      struct drm_exec *exec,
570 			      unsigned int num_fences);
571 
572 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
573 			    struct drm_exec *exec,
574 			    u64 addr, u64 range,
575 			    unsigned int num_fences);
576 
577 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
578 
579 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
580 			      struct drm_gem_object **objs,
581 			      unsigned int num_objs);
582 
583 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
584 			      u64 addr, u64 range);
585 
586 /**
587  * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
588  * @vm_exec: the &drm_gpuvm_exec wrapper
589  *
590  * Releases all dma-resv locks of all &drm_gem_objects previously acquired
591  * through drm_gpuvm_exec_lock() or its variants.
592  *
593  * Returns: 0 on success, negative error code on failure.
594  */
595 static inline void
596 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
597 {
598 	drm_exec_fini(&vm_exec->exec);
599 }
600 
601 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
602 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
603 			      struct drm_exec *exec,
604 			      struct dma_fence *fence,
605 			      enum dma_resv_usage private_usage,
606 			      enum dma_resv_usage extobj_usage);
607 
608 /**
609  * drm_gpuvm_exec_resv_add_fence()
610  * @vm_exec: the &drm_gpuvm_exec wrapper
611  * @fence: fence to add
612  * @private_usage: private dma-resv usage
613  * @extobj_usage: extobj dma-resv usage
614  *
615  * See drm_gpuvm_resv_add_fence().
616  */
617 static inline void
618 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
619 			      struct dma_fence *fence,
620 			      enum dma_resv_usage private_usage,
621 			      enum dma_resv_usage extobj_usage)
622 {
623 	drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
624 				 private_usage, extobj_usage);
625 }
626 
627 /**
628  * drm_gpuvm_exec_validate()
629  * @vm_exec: the &drm_gpuvm_exec wrapper
630  *
631  * See drm_gpuvm_validate().
632  */
633 static inline int
634 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
635 {
636 	return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
637 }
638 
639 /**
640  * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
641  * &drm_gem_object combination
642  *
643  * This structure is an abstraction representing a &drm_gpuvm and
644  * &drm_gem_object combination. It serves as an indirection to accelerate
645  * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
646  * &drm_gem_object.
647  *
648  * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
649  * accelerate validation.
650  *
651  * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
652  * a GEM object is mapped first in a GPU-VM and release the instance once the
653  * last mapping of the GEM object in this GPU-VM is unmapped.
654  */
655 struct drm_gpuvm_bo {
656 	/**
657 	 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
658 	 * counted pointer.
659 	 */
660 	struct drm_gpuvm *vm;
661 
662 	/**
663 	 * @obj: The &drm_gem_object being mapped in @vm. This is a reference
664 	 * counted pointer.
665 	 */
666 	struct drm_gem_object *obj;
667 
668 	/**
669 	 * @evicted: Indicates whether the &drm_gem_object is evicted; field
670 	 * protected by the &drm_gem_object's dma-resv lock.
671 	 */
672 	bool evicted;
673 
674 	/**
675 	 * @kref: The reference count for this &drm_gpuvm_bo.
676 	 */
677 	struct kref kref;
678 
679 	/**
680 	 * @list: Structure containing all &list_heads.
681 	 */
682 	struct {
683 		/**
684 		 * @gpuva: The list of linked &drm_gpuvas.
685 		 *
686 		 * It is safe to access entries from this list as long as the
687 		 * GEM's gpuva lock is held. See also struct drm_gem_object.
688 		 */
689 		struct list_head gpuva;
690 
691 		/**
692 		 * @entry: Structure containing all &list_heads serving as
693 		 * entry.
694 		 */
695 		struct {
696 			/**
697 			 * @gem: List entry to attach to the &drm_gem_objects
698 			 * gpuva list.
699 			 */
700 			struct list_head gem;
701 
702 			/**
703 			 * @evict: List entry to attach to the &drm_gpuvms
704 			 * extobj list.
705 			 */
706 			struct list_head extobj;
707 
708 			/**
709 			 * @evict: List entry to attach to the &drm_gpuvms evict
710 			 * list.
711 			 */
712 			struct list_head evict;
713 		} entry;
714 	} list;
715 };
716 
717 struct drm_gpuvm_bo *
718 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
719 		    struct drm_gem_object *obj);
720 
721 struct drm_gpuvm_bo *
722 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
723 		    struct drm_gem_object *obj);
724 struct drm_gpuvm_bo *
725 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
726 
727 /**
728  * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
729  * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
730  *
731  * This function acquires an additional reference to @vm_bo. It is illegal to
732  * call this without already holding a reference. No locks required.
733  */
734 static inline struct drm_gpuvm_bo *
735 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
736 {
737 	kref_get(&vm_bo->kref);
738 	return vm_bo;
739 }
740 
741 void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
742 
743 struct drm_gpuvm_bo *
744 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
745 		  struct drm_gem_object *obj);
746 
747 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
748 
749 /**
750  * drm_gpuvm_bo_gem_evict()
751  * @obj: the &drm_gem_object
752  * @evict: indicates whether @obj is evicted
753  *
754  * See drm_gpuvm_bo_evict().
755  */
756 static inline void
757 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
758 {
759 	struct drm_gpuvm_bo *vm_bo;
760 
761 	drm_gem_gpuva_assert_lock_held(obj);
762 	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
763 		drm_gpuvm_bo_evict(vm_bo, evict);
764 }
765 
766 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
767 
768 /**
769  * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
770  * @va__: &drm_gpuva structure to assign to in each iteration step
771  * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
772  *
773  * This iterator walks over all &drm_gpuva structures associated with the
774  * &drm_gpuvm_bo.
775  *
776  * The caller must hold the GEM's gpuva lock.
777  */
778 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
779 	list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
780 
781 /**
782  * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
783  * &drm_gpuva
784  * @va__: &drm_gpuva structure to assign to in each iteration step
785  * @next__: &next &drm_gpuva to store the next step
786  * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
787  *
788  * This iterator walks over all &drm_gpuva structures associated with the
789  * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
790  * it is save against removal of elements.
791  *
792  * The caller must hold the GEM's gpuva lock.
793  */
794 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
795 	list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
796 
797 /**
798  * enum drm_gpuva_op_type - GPU VA operation type
799  *
800  * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
801  */
802 enum drm_gpuva_op_type {
803 	/**
804 	 * @DRM_GPUVA_OP_MAP: the map op type
805 	 */
806 	DRM_GPUVA_OP_MAP,
807 
808 	/**
809 	 * @DRM_GPUVA_OP_REMAP: the remap op type
810 	 */
811 	DRM_GPUVA_OP_REMAP,
812 
813 	/**
814 	 * @DRM_GPUVA_OP_UNMAP: the unmap op type
815 	 */
816 	DRM_GPUVA_OP_UNMAP,
817 
818 	/**
819 	 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
820 	 */
821 	DRM_GPUVA_OP_PREFETCH,
822 };
823 
824 /**
825  * struct drm_gpuva_op_map - GPU VA map operation
826  *
827  * This structure represents a single map operation generated by the
828  * DRM GPU VA manager.
829  */
830 struct drm_gpuva_op_map {
831 	/**
832 	 * @va: structure containing address and range of a map
833 	 * operation
834 	 */
835 	struct {
836 		/**
837 		 * @addr: the base address of the new mapping
838 		 */
839 		u64 addr;
840 
841 		/**
842 		 * @range: the range of the new mapping
843 		 */
844 		u64 range;
845 	} va;
846 
847 	/**
848 	 * @gem: structure containing the &drm_gem_object and it's offset
849 	 */
850 	struct {
851 		/**
852 		 * @offset: the offset within the &drm_gem_object
853 		 */
854 		u64 offset;
855 
856 		/**
857 		 * @obj: the &drm_gem_object to map
858 		 */
859 		struct drm_gem_object *obj;
860 	} gem;
861 };
862 
863 /**
864  * struct drm_gpuva_op_unmap - GPU VA unmap operation
865  *
866  * This structure represents a single unmap operation generated by the
867  * DRM GPU VA manager.
868  */
869 struct drm_gpuva_op_unmap {
870 	/**
871 	 * @va: the &drm_gpuva to unmap
872 	 */
873 	struct drm_gpuva *va;
874 
875 	/**
876 	 * @keep:
877 	 *
878 	 * Indicates whether this &drm_gpuva is physically contiguous with the
879 	 * original mapping request.
880 	 *
881 	 * Optionally, if &keep is set, drivers may keep the actual page table
882 	 * mappings for this &drm_gpuva, adding the missing page table entries
883 	 * only and update the &drm_gpuvm accordingly.
884 	 */
885 	bool keep;
886 };
887 
888 /**
889  * struct drm_gpuva_op_remap - GPU VA remap operation
890  *
891  * This represents a single remap operation generated by the DRM GPU VA manager.
892  *
893  * A remap operation is generated when an existing GPU VA mmapping is split up
894  * by inserting a new GPU VA mapping or by partially unmapping existent
895  * mapping(s), hence it consists of a maximum of two map and one unmap
896  * operation.
897  *
898  * The @unmap operation takes care of removing the original existing mapping.
899  * @prev is used to remap the preceding part, @next the subsequent part.
900  *
901  * If either a new mapping's start address is aligned with the start address
902  * of the old mapping or the new mapping's end address is aligned with the
903  * end address of the old mapping, either @prev or @next is NULL.
904  *
905  * Note, the reason for a dedicated remap operation, rather than arbitrary
906  * unmap and map operations, is to give drivers the chance of extracting driver
907  * specific data for creating the new mappings from the unmap operations's
908  * &drm_gpuva structure which typically is embedded in larger driver specific
909  * structures.
910  */
911 struct drm_gpuva_op_remap {
912 	/**
913 	 * @prev: the preceding part of a split mapping
914 	 */
915 	struct drm_gpuva_op_map *prev;
916 
917 	/**
918 	 * @next: the subsequent part of a split mapping
919 	 */
920 	struct drm_gpuva_op_map *next;
921 
922 	/**
923 	 * @unmap: the unmap operation for the original existing mapping
924 	 */
925 	struct drm_gpuva_op_unmap *unmap;
926 };
927 
928 /**
929  * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
930  *
931  * This structure represents a single prefetch operation generated by the
932  * DRM GPU VA manager.
933  */
934 struct drm_gpuva_op_prefetch {
935 	/**
936 	 * @va: the &drm_gpuva to prefetch
937 	 */
938 	struct drm_gpuva *va;
939 };
940 
941 /**
942  * struct drm_gpuva_op - GPU VA operation
943  *
944  * This structure represents a single generic operation.
945  *
946  * The particular type of the operation is defined by @op.
947  */
948 struct drm_gpuva_op {
949 	/**
950 	 * @entry:
951 	 *
952 	 * The &list_head used to distribute instances of this struct within
953 	 * &drm_gpuva_ops.
954 	 */
955 	struct list_head entry;
956 
957 	/**
958 	 * @op: the type of the operation
959 	 */
960 	enum drm_gpuva_op_type op;
961 
962 	union {
963 		/**
964 		 * @map: the map operation
965 		 */
966 		struct drm_gpuva_op_map map;
967 
968 		/**
969 		 * @remap: the remap operation
970 		 */
971 		struct drm_gpuva_op_remap remap;
972 
973 		/**
974 		 * @unmap: the unmap operation
975 		 */
976 		struct drm_gpuva_op_unmap unmap;
977 
978 		/**
979 		 * @prefetch: the prefetch operation
980 		 */
981 		struct drm_gpuva_op_prefetch prefetch;
982 	};
983 };
984 
985 /**
986  * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
987  */
988 struct drm_gpuva_ops {
989 	/**
990 	 * @list: the &list_head
991 	 */
992 	struct list_head list;
993 };
994 
995 /**
996  * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
997  * @op: &drm_gpuva_op to assign in each iteration step
998  * @ops: &drm_gpuva_ops to walk
999  *
1000  * This iterator walks over all ops within a given list of operations.
1001  */
1002 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
1003 
1004 /**
1005  * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
1006  * @op: &drm_gpuva_op to assign in each iteration step
1007  * @next: &next &drm_gpuva_op to store the next step
1008  * @ops: &drm_gpuva_ops to walk
1009  *
1010  * This iterator walks over all ops within a given list of operations. It is
1011  * implemented with list_for_each_safe(), so save against removal of elements.
1012  */
1013 #define drm_gpuva_for_each_op_safe(op, next, ops) \
1014 	list_for_each_entry_safe(op, next, &(ops)->list, entry)
1015 
1016 /**
1017  * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1018  * @op: &drm_gpuva_op to assign in each iteration step
1019  * @ops: &drm_gpuva_ops to walk
1020  *
1021  * This iterator walks over all ops within a given list of operations beginning
1022  * from the given operation in reverse order.
1023  */
1024 #define drm_gpuva_for_each_op_from_reverse(op, ops) \
1025 	list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1026 
1027 /**
1028  * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1029  * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
1030  */
1031 #define drm_gpuva_first_op(ops) \
1032 	list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1033 
1034 /**
1035  * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1036  * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
1037  */
1038 #define drm_gpuva_last_op(ops) \
1039 	list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1040 
1041 /**
1042  * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1043  * @op: the current &drm_gpuva_op
1044  */
1045 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
1046 
1047 /**
1048  * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1049  * @op: the current &drm_gpuva_op
1050  */
1051 #define drm_gpuva_next_op(op) list_next_entry(op, entry)
1052 
1053 struct drm_gpuva_ops *
1054 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1055 			    u64 addr, u64 range,
1056 			    struct drm_gem_object *obj, u64 offset);
1057 struct drm_gpuva_ops *
1058 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
1059 			      u64 addr, u64 range);
1060 
1061 struct drm_gpuva_ops *
1062 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
1063 				 u64 addr, u64 range);
1064 
1065 struct drm_gpuva_ops *
1066 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
1067 
1068 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
1069 			struct drm_gpuva_ops *ops);
1070 
1071 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
1072 					  struct drm_gpuva_op_map *op)
1073 {
1074 	drm_gpuva_init(va, op->va.addr, op->va.range,
1075 		       op->gem.obj, op->gem.offset);
1076 }
1077 
1078 /**
1079  * struct drm_gpuvm_ops - callbacks for split/merge steps
1080  *
1081  * This structure defines the callbacks used by &drm_gpuvm_sm_map and
1082  * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
1083  * operations to drivers.
1084  */
1085 struct drm_gpuvm_ops {
1086 	/**
1087 	 * @vm_free: called when the last reference of a struct drm_gpuvm is
1088 	 * dropped
1089 	 *
1090 	 * This callback is mandatory.
1091 	 */
1092 	void (*vm_free)(struct drm_gpuvm *gpuvm);
1093 
1094 	/**
1095 	 * @op_alloc: called when the &drm_gpuvm allocates
1096 	 * a struct drm_gpuva_op
1097 	 *
1098 	 * Some drivers may want to embed struct drm_gpuva_op into driver
1099 	 * specific structures. By implementing this callback drivers can
1100 	 * allocate memory accordingly.
1101 	 *
1102 	 * This callback is optional.
1103 	 */
1104 	struct drm_gpuva_op *(*op_alloc)(void);
1105 
1106 	/**
1107 	 * @op_free: called when the &drm_gpuvm frees a
1108 	 * struct drm_gpuva_op
1109 	 *
1110 	 * Some drivers may want to embed struct drm_gpuva_op into driver
1111 	 * specific structures. By implementing this callback drivers can
1112 	 * free the previously allocated memory accordingly.
1113 	 *
1114 	 * This callback is optional.
1115 	 */
1116 	void (*op_free)(struct drm_gpuva_op *op);
1117 
1118 	/**
1119 	 * @vm_bo_alloc: called when the &drm_gpuvm allocates
1120 	 * a struct drm_gpuvm_bo
1121 	 *
1122 	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1123 	 * specific structures. By implementing this callback drivers can
1124 	 * allocate memory accordingly.
1125 	 *
1126 	 * This callback is optional.
1127 	 */
1128 	struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
1129 
1130 	/**
1131 	 * @vm_bo_free: called when the &drm_gpuvm frees a
1132 	 * struct drm_gpuvm_bo
1133 	 *
1134 	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1135 	 * specific structures. By implementing this callback drivers can
1136 	 * free the previously allocated memory accordingly.
1137 	 *
1138 	 * This callback is optional.
1139 	 */
1140 	void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
1141 
1142 	/**
1143 	 * @vm_bo_validate: called from drm_gpuvm_validate()
1144 	 *
1145 	 * Drivers receive this callback for every evicted &drm_gem_object being
1146 	 * mapped in the corresponding &drm_gpuvm.
1147 	 *
1148 	 * Typically, drivers would call their driver specific variant of
1149 	 * ttm_bo_validate() from within this callback.
1150 	 */
1151 	int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
1152 			      struct drm_exec *exec);
1153 
1154 	/**
1155 	 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
1156 	 * mapping once all previous steps were completed
1157 	 *
1158 	 * The &priv pointer matches the one the driver passed to
1159 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1160 	 *
1161 	 * Can be NULL if &drm_gpuvm_sm_map is used.
1162 	 */
1163 	int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
1164 
1165 	/**
1166 	 * @sm_step_remap: called from &drm_gpuvm_sm_map and
1167 	 * &drm_gpuvm_sm_unmap to split up an existent mapping
1168 	 *
1169 	 * This callback is called when existent mapping needs to be split up.
1170 	 * This is the case when either a newly requested mapping overlaps or
1171 	 * is enclosed by an existent mapping or a partial unmap of an existent
1172 	 * mapping is requested.
1173 	 *
1174 	 * The &priv pointer matches the one the driver passed to
1175 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1176 	 *
1177 	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1178 	 * used.
1179 	 */
1180 	int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
1181 
1182 	/**
1183 	 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
1184 	 * &drm_gpuvm_sm_unmap to unmap an existent mapping
1185 	 *
1186 	 * This callback is called when existent mapping needs to be unmapped.
1187 	 * This is the case when either a newly requested mapping encloses an
1188 	 * existent mapping or an unmap of an existent mapping is requested.
1189 	 *
1190 	 * The &priv pointer matches the one the driver passed to
1191 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1192 	 *
1193 	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1194 	 * used.
1195 	 */
1196 	int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
1197 };
1198 
1199 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1200 		     u64 addr, u64 range,
1201 		     struct drm_gem_object *obj, u64 offset);
1202 
1203 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
1204 		       u64 addr, u64 range);
1205 
1206 void drm_gpuva_map(struct drm_gpuvm *gpuvm,
1207 		   struct drm_gpuva *va,
1208 		   struct drm_gpuva_op_map *op);
1209 
1210 void drm_gpuva_remap(struct drm_gpuva *prev,
1211 		     struct drm_gpuva *next,
1212 		     struct drm_gpuva_op_remap *op);
1213 
1214 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
1215 
1216 /**
1217  * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1218  * the unmap stage of a remap op.
1219  * @op: Remap op.
1220  * @start_addr: Output pointer for the start of the required unmap.
1221  * @range: Output pointer for the length of the required unmap.
1222  *
1223  * The given start address and range will be set such that they represent the
1224  * range of the address space that was previously covered by the mapping being
1225  * re-mapped, but is now empty.
1226  */
1227 static inline void
1228 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
1229 				  u64 *start_addr, u64 *range)
1230 {
1231 	const u64 va_start = op->prev ?
1232 			     op->prev->va.addr + op->prev->va.range :
1233 			     op->unmap->va->va.addr;
1234 	const u64 va_end = op->next ?
1235 			   op->next->va.addr :
1236 			   op->unmap->va->va.addr + op->unmap->va->va.range;
1237 
1238 	if (start_addr)
1239 		*start_addr = va_start;
1240 	if (range)
1241 		*range = va_end - va_start;
1242 }
1243 
1244 #endif /* __DRM_GPUVM_H__ */
1245