xref: /linux/drivers/gpu/drm/drm_gpuvm.c (revision 5ae65bdcb867555540169ef57876658262a67d87)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright (c) 2022 Red Hat.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *     Danilo Krummrich <dakr@redhat.com>
25  *
26  */
27 
28 #include <drm/drm_gpuvm.h>
29 
30 #include <linux/export.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/mm.h>
33 
34 /**
35  * DOC: Overview
36  *
37  * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
38  * GPU's virtual address (VA) space and manages the corresponding virtual
39  * mappings represented by &drm_gpuva objects. It also keeps track of the
40  * mapping's backing &drm_gem_object buffers.
41  *
42  * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
43  * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
44  *
45  * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
46  * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
47  *
48  * The GPU VA manager internally uses a rb-tree to manage the
49  * &drm_gpuva mappings within a GPU's virtual address space.
50  *
51  * The &drm_gpuvm structure contains a special &drm_gpuva representing the
52  * portion of VA space reserved by the kernel. This node is initialized together
53  * with the GPU VA manager instance and removed when the GPU VA manager is
54  * destroyed.
55  *
56  * In a typical application drivers would embed struct drm_gpuvm and
57  * struct drm_gpuva within their own driver specific structures, there won't be
58  * any memory allocations of its own nor memory allocations of &drm_gpuva
59  * entries.
60  *
61  * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
62  * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
63  * entries from within dma-fence signalling critical sections it is enough to
64  * pre-allocate the &drm_gpuva structures.
65  *
66  * &drm_gem_objects which are private to a single VM can share a common
67  * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
68  * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
69  * the following called 'resv object', which serves as the container of the
70  * GPUVM's shared &dma_resv. This resv object can be a driver specific
71  * &drm_gem_object, such as the &drm_gem_object containing the root page table,
72  * but it can also be a 'dummy' object, which can be allocated with
73  * drm_gpuvm_resv_object_alloc().
74  *
75  * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
76  * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
77  * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
78  *
79  * A &drm_gpuvm_bo is an abstraction that represents a combination of a
80  * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
81  * This is ensured by the API through drm_gpuvm_bo_obtain() and
82  * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
83  * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
84  * particular combination. If not present, a new instance is created and linked
85  * to the &drm_gem_object.
86  *
87  * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
88  * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
89  * lists are maintained in order to accelerate locking of dma-resv locks and
90  * validation of evicted objects bound in a &drm_gpuvm. For instance, all
91  * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
92  * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
93  * order to validate all evicted &drm_gem_objects. It is also possible to lock
94  * additional &drm_gem_objects by providing the corresponding parameters to
95  * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
96  * use of helper functions such as drm_gpuvm_prepare_range() or
97  * drm_gpuvm_prepare_objects().
98  *
99  * Every bound &drm_gem_object is treated as external object when its &dma_resv
100  * structure is different than the &drm_gpuvm's common &dma_resv structure.
101  */
102 
103 /**
104  * DOC: Split and Merge
105  *
106  * Besides its capability to manage and represent a GPU VA space, the
107  * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
108  * sequence of operations to satisfy a given map or unmap request.
109  *
110  * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
111  * and merging of existing GPU VA mappings with the ones that are requested to
112  * be mapped or unmapped. This feature is required by the Vulkan API to
113  * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
114  * as VM BIND.
115  *
116  * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
117  * containing map, unmap and remap operations for a given newly requested
118  * mapping. The sequence of callbacks represents the set of operations to
119  * execute in order to integrate the new mapping cleanly into the current state
120  * of the GPU VA space.
121  *
122  * Depending on how the new GPU VA mapping intersects with the existing mappings
123  * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
124  * of unmap operations, a maximum of two remap operations and a single map
125  * operation. The caller might receive no callback at all if no operation is
126  * required, e.g. if the requested mapping already exists in the exact same way.
127  *
128  * The single map operation represents the original map operation requested by
129  * the caller.
130  *
131  * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
132  * &drm_gpuva to unmap is physically contiguous with the original mapping
133  * request. Optionally, if 'keep' is set, drivers may keep the actual page table
134  * entries for this &drm_gpuva, adding the missing page table entries only and
135  * update the &drm_gpuvm's view of things accordingly.
136  *
137  * Drivers may do the same optimization, namely delta page table updates, also
138  * for remap operations. This is possible since &drm_gpuva_op_remap consists of
139  * one unmap operation and one or two map operations, such that drivers can
140  * derive the page table update delta accordingly.
141  *
142  * Note that there can't be more than two existing mappings to split up, one at
143  * the beginning and one at the end of the new mapping, hence there is a
144  * maximum of two remap operations.
145  *
146  * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
147  * call back into the driver in order to unmap a range of GPU VA space. The
148  * logic behind this function is way simpler though: For all existing mappings
149  * enclosed by the given range unmap operations are created. For mappings which
150  * are only partially located within the given range, remap operations are
151  * created such that those mappings are split up and re-mapped partially.
152  *
153  * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
154  * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
155  * to directly obtain an instance of struct drm_gpuva_ops containing a list of
156  * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
157  * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
158  * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
159  * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
160  * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
161  * allocations are possible (e.g. to allocate GPU page tables) and once in the
162  * dma-fence signalling critical path.
163  *
164  * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
165  * drm_gpuva_remove() may be used. These functions can safely be used from
166  * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
167  * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
168  * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
169  * drm_gpuva_unmap() instead.
170  *
171  * The following diagram depicts the basic relationships of existing GPU VA
172  * mappings, a newly requested mapping and the resulting mappings as implemented
173  * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
174  *
175  * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
176  *    could be kept.
177  *
178  *    ::
179  *
180  *	     0     a     1
181  *	old: |-----------| (bo_offset=n)
182  *
183  *	     0     a     1
184  *	req: |-----------| (bo_offset=n)
185  *
186  *	     0     a     1
187  *	new: |-----------| (bo_offset=n)
188  *
189  *
190  * 2) Requested mapping is identical, except for the BO offset, hence replace
191  *    the mapping.
192  *
193  *    ::
194  *
195  *	     0     a     1
196  *	old: |-----------| (bo_offset=n)
197  *
198  *	     0     a     1
199  *	req: |-----------| (bo_offset=m)
200  *
201  *	     0     a     1
202  *	new: |-----------| (bo_offset=m)
203  *
204  *
205  * 3) Requested mapping is identical, except for the backing BO, hence replace
206  *    the mapping.
207  *
208  *    ::
209  *
210  *	     0     a     1
211  *	old: |-----------| (bo_offset=n)
212  *
213  *	     0     b     1
214  *	req: |-----------| (bo_offset=n)
215  *
216  *	     0     b     1
217  *	new: |-----------| (bo_offset=n)
218  *
219  *
220  * 4) Existent mapping is a left aligned subset of the requested one, hence
221  *    replace the existing one.
222  *
223  *    ::
224  *
225  *	     0  a  1
226  *	old: |-----|       (bo_offset=n)
227  *
228  *	     0     a     2
229  *	req: |-----------| (bo_offset=n)
230  *
231  *	     0     a     2
232  *	new: |-----------| (bo_offset=n)
233  *
234  *    .. note::
235  *       We expect to see the same result for a request with a different BO
236  *       and/or non-contiguous BO offset.
237  *
238  *
239  * 5) Requested mapping's range is a left aligned subset of the existing one,
240  *    but backed by a different BO. Hence, map the requested mapping and split
241  *    the existing one adjusting its BO offset.
242  *
243  *    ::
244  *
245  *	     0     a     2
246  *	old: |-----------| (bo_offset=n)
247  *
248  *	     0  b  1
249  *	req: |-----|       (bo_offset=n)
250  *
251  *	     0  b  1  a' 2
252  *	new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
253  *
254  *    .. note::
255  *       We expect to see the same result for a request with a different BO
256  *       and/or non-contiguous BO offset.
257  *
258  *
259  * 6) Existent mapping is a superset of the requested mapping. Split it up, but
260  *    indicate that the backing PTEs could be kept.
261  *
262  *    ::
263  *
264  *	     0     a     2
265  *	old: |-----------| (bo_offset=n)
266  *
267  *	     0  a  1
268  *	req: |-----|       (bo_offset=n)
269  *
270  *	     0  a  1  a' 2
271  *	new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
272  *
273  *
274  * 7) Requested mapping's range is a right aligned subset of the existing one,
275  *    but backed by a different BO. Hence, map the requested mapping and split
276  *    the existing one, without adjusting the BO offset.
277  *
278  *    ::
279  *
280  *	     0     a     2
281  *	old: |-----------| (bo_offset=n)
282  *
283  *	           1  b  2
284  *	req:       |-----| (bo_offset=m)
285  *
286  *	     0  a  1  b  2
287  *	new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
288  *
289  *
290  * 8) Existent mapping is a superset of the requested mapping. Split it up, but
291  *    indicate that the backing PTEs could be kept.
292  *
293  *    ::
294  *
295  *	      0     a     2
296  *	old: |-----------| (bo_offset=n)
297  *
298  *	           1  a  2
299  *	req:       |-----| (bo_offset=n+1)
300  *
301  *	     0  a' 1  a  2
302  *	new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
303  *
304  *
305  * 9) Existent mapping is overlapped at the end by the requested mapping backed
306  *    by a different BO. Hence, map the requested mapping and split up the
307  *    existing one, without adjusting the BO offset.
308  *
309  *    ::
310  *
311  *	     0     a     2
312  *	old: |-----------|       (bo_offset=n)
313  *
314  *	           1     b     3
315  *	req:       |-----------| (bo_offset=m)
316  *
317  *	     0  a  1     b     3
318  *	new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
319  *
320  *
321  * 10) Existent mapping is overlapped by the requested mapping, both having the
322  *     same backing BO with a contiguous offset. Indicate the backing PTEs of
323  *     the old mapping could be kept.
324  *
325  *     ::
326  *
327  *	      0     a     2
328  *	 old: |-----------|       (bo_offset=n)
329  *
330  *	            1     a     3
331  *	 req:       |-----------| (bo_offset=n+1)
332  *
333  *	      0  a' 1     a     3
334  *	 new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
335  *
336  *
337  * 11) Requested mapping's range is a centered subset of the existing one
338  *     having a different backing BO. Hence, map the requested mapping and split
339  *     up the existing one in two mappings, adjusting the BO offset of the right
340  *     one accordingly.
341  *
342  *     ::
343  *
344  *	      0        a        3
345  *	 old: |-----------------| (bo_offset=n)
346  *
347  *	            1  b  2
348  *	 req:       |-----|       (bo_offset=m)
349  *
350  *	      0  a  1  b  2  a' 3
351  *	 new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
352  *
353  *
354  * 12) Requested mapping is a contiguous subset of the existing one. Split it
355  *     up, but indicate that the backing PTEs could be kept.
356  *
357  *     ::
358  *
359  *	      0        a        3
360  *	 old: |-----------------| (bo_offset=n)
361  *
362  *	            1  a  2
363  *	 req:       |-----|       (bo_offset=n+1)
364  *
365  *	      0  a' 1  a  2 a'' 3
366  *	 old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
367  *
368  *
369  * 13) Existent mapping is a right aligned subset of the requested one, hence
370  *     replace the existing one.
371  *
372  *     ::
373  *
374  *	            1  a  2
375  *	 old:       |-----| (bo_offset=n+1)
376  *
377  *	      0     a     2
378  *	 req: |-----------| (bo_offset=n)
379  *
380  *	      0     a     2
381  *	 new: |-----------| (bo_offset=n)
382  *
383  *     .. note::
384  *        We expect to see the same result for a request with a different bo
385  *        and/or non-contiguous bo_offset.
386  *
387  *
388  * 14) Existent mapping is a centered subset of the requested one, hence
389  *     replace the existing one.
390  *
391  *     ::
392  *
393  *	            1  a  2
394  *	 old:       |-----| (bo_offset=n+1)
395  *
396  *	      0        a       3
397  *	 req: |----------------| (bo_offset=n)
398  *
399  *	      0        a       3
400  *	 new: |----------------| (bo_offset=n)
401  *
402  *     .. note::
403  *        We expect to see the same result for a request with a different bo
404  *        and/or non-contiguous bo_offset.
405  *
406  *
407  * 15) Existent mappings is overlapped at the beginning by the requested mapping
408  *     backed by a different BO. Hence, map the requested mapping and split up
409  *     the existing one, adjusting its BO offset accordingly.
410  *
411  *     ::
412  *
413  *	            1     a     3
414  *	 old:       |-----------| (bo_offset=n)
415  *
416  *	      0     b     2
417  *	 req: |-----------|       (bo_offset=m)
418  *
419  *	      0     b     2  a' 3
420  *	 new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
421  */
422 
423 /**
424  * DOC: Madvise Logic - Splitting and Traversal
425  *
426  * This logic handles GPU VA range updates by generating remap and map operations
427  * without performing unmaps or merging existing mappings.
428  *
429  * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
430  * the existing mapping at the start and end boundaries and inserts a new map.
431  *
432  * ::
433  *              a      start    end     b
434  *         pre: |-----------------------|
435  *                     drm_gpuva1
436  *
437  *              a      start    end     b
438  *         new: |-----|=========|-------|
439  *               remap   map      remap
440  *
441  * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
442  *
443  * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
444  * across boundaries, remapping the start and end segments, and inserting two
445  * map operations to cover the full range.
446  *
447  * ::           a       start      b              c        end       d
448  *         pre: |------------------|--------------|------------------|
449  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
450  *
451  *              a       start      b              c        end       d
452  *         new: |-------|==========|--------------|========|---------|
453  *                remap1   map1       drm_gpuva2    map2     remap2
454  *
455  * two REMAPS and two MAPS
456  *
457  * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
458  * are generated to update the affected portion.
459  *
460  *
461  * ::           a/start            b              c        end       d
462  *         pre: |------------------|--------------|------------------|
463  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
464  *
465  *              a/start            b              c        end       d
466  *         new: |------------------|--------------|========|---------|
467  *                drm_gpuva1         drm_gpuva2     map1     remap1
468  *
469  * ::           a       start      b              c/end              d
470  *         pre: |------------------|--------------|------------------|
471  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
472  *
473  *              a       start      b              c/end              d
474  *         new: |-------|==========|--------------|------------------|
475  *                remap1   map1       drm_gpuva2        drm_gpuva3
476  *
477  * one REMAP and one MAP
478  *
479  * 4) Both start and end align with existing drm_gpuva boundaries. No operations
480  * are needed as the range is already covered.
481  *
482  * 5) No existing drm_gpuvas. No operations.
483  *
484  * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
485  * focusing solely on remap and map operations for efficient traversal and update.
486  */
487 
488 /**
489  * DOC: Locking
490  *
491  * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
492  * locking itself, it is the drivers responsibility to take care about locking.
493  * Drivers might want to protect the following operations: inserting, removing
494  * and iterating &drm_gpuva objects as well as generating all kinds of
495  * operations, such as split / merge or prefetch.
496  *
497  * DRM GPUVM also does not take care of the locking of the backing
498  * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
499  * itself; drivers are responsible to enforce mutual exclusion using either the
500  * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
501  *
502  * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
503  * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
504  * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
505  * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
506  *
507  * The latter is required since on creation and destruction of a &drm_gpuvm_bo
508  * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
509  * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
510  * &drm_gem_object must be able to observe previous creations and destructions
511  * of &drm_gpuvm_bos in order to keep instances unique.
512  *
513  * The &drm_gpuvm's lists for keeping track of external and evicted objects are
514  * protected against concurrent insertion / removal and iteration internally.
515  *
516  * However, drivers still need ensure to protect concurrent calls to functions
517  * iterating those lists, namely drm_gpuvm_prepare_objects() and
518  * drm_gpuvm_validate().
519  *
520  * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
521  * that the corresponding &dma_resv locks are held in order to protect the
522  * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
523  * the corresponding lockdep checks are enabled. This is an optimization for
524  * drivers which are capable of taking the corresponding &dma_resv locks and
525  * hence do not require internal locking.
526  */
527 
528 /**
529  * DOC: Examples
530  *
531  * This section gives two examples on how to let the DRM GPUVA Manager generate
532  * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
533  * make use of them.
534  *
535  * The below code is strictly limited to illustrate the generic usage pattern.
536  * To maintain simplicity, it doesn't make use of any abstractions for common
537  * code, different (asynchronous) stages with fence signalling critical paths,
538  * any other helpers or error handling in terms of freeing memory and dropping
539  * previously taken locks.
540  *
541  * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
542  *
543  *	// Allocates a new &drm_gpuva.
544  *	struct drm_gpuva * driver_gpuva_alloc(void);
545  *
546  *	// Typically drivers would embed the &drm_gpuvm and &drm_gpuva
547  *	// structure in individual driver structures and lock the dma-resv with
548  *	// drm_exec or similar helpers.
549  *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
550  *				  u64 addr, u64 range,
551  *				  struct drm_gem_object *obj, u64 offset)
552  *	{
553  *		struct drm_gpuvm_map_req map_req = {
554  *		        .map.va.addr = addr,
555  *	                .map.va.range = range,
556  *	                .map.gem.obj = obj,
557  *	                .map.gem.offset = offset,
558  *	           };
559  *		struct drm_gpuva_ops *ops;
560  *		struct drm_gpuva_op *op
561  *		struct drm_gpuvm_bo *vm_bo;
562  *
563  *		driver_lock_va_space();
564  *		ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
565  *		if (IS_ERR(ops))
566  *			return PTR_ERR(ops);
567  *
568  *		vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
569  *		if (IS_ERR(vm_bo))
570  *			return PTR_ERR(vm_bo);
571  *
572  *		drm_gpuva_for_each_op(op, ops) {
573  *			struct drm_gpuva *va;
574  *
575  *			switch (op->op) {
576  *			case DRM_GPUVA_OP_MAP:
577  *				va = driver_gpuva_alloc();
578  *				if (!va)
579  *					; // unwind previous VA space updates,
580  *					  // free memory and unlock
581  *
582  *				driver_vm_map();
583  *				drm_gpuva_map(gpuvm, va, &op->map);
584  *				drm_gpuva_link(va, vm_bo);
585  *
586  *				break;
587  *			case DRM_GPUVA_OP_REMAP: {
588  *				struct drm_gpuva *prev = NULL, *next = NULL;
589  *
590  *				va = op->remap.unmap->va;
591  *
592  *				if (op->remap.prev) {
593  *					prev = driver_gpuva_alloc();
594  *					if (!prev)
595  *						; // unwind previous VA space
596  *						  // updates, free memory and
597  *						  // unlock
598  *				}
599  *
600  *				if (op->remap.next) {
601  *					next = driver_gpuva_alloc();
602  *					if (!next)
603  *						; // unwind previous VA space
604  *						  // updates, free memory and
605  *						  // unlock
606  *				}
607  *
608  *				driver_vm_remap();
609  *				drm_gpuva_remap(prev, next, &op->remap);
610  *
611  *				if (prev)
612  *					drm_gpuva_link(prev, va->vm_bo);
613  *				if (next)
614  *					drm_gpuva_link(next, va->vm_bo);
615  *				drm_gpuva_unlink(va);
616  *
617  *				break;
618  *			}
619  *			case DRM_GPUVA_OP_UNMAP:
620  *				va = op->unmap->va;
621  *
622  *				driver_vm_unmap();
623  *				drm_gpuva_unlink(va);
624  *				drm_gpuva_unmap(&op->unmap);
625  *
626  *				break;
627  *			default:
628  *				break;
629  *			}
630  *		}
631  *		drm_gpuvm_bo_put(vm_bo);
632  *		driver_unlock_va_space();
633  *
634  *		return 0;
635  *	}
636  *
637  * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
638  *
639  *	struct driver_context {
640  *		struct drm_gpuvm *gpuvm;
641  *		struct drm_gpuvm_bo *vm_bo;
642  *		struct drm_gpuva *new_va;
643  *		struct drm_gpuva *prev_va;
644  *		struct drm_gpuva *next_va;
645  *	};
646  *
647  *	// ops to pass to drm_gpuvm_init()
648  *	static const struct drm_gpuvm_ops driver_gpuvm_ops = {
649  *		.sm_step_map = driver_gpuva_map,
650  *		.sm_step_remap = driver_gpuva_remap,
651  *		.sm_step_unmap = driver_gpuva_unmap,
652  *	};
653  *
654  *	// Typically drivers would embed the &drm_gpuvm and &drm_gpuva
655  *	// structure in individual driver structures and lock the dma-resv with
656  *	// drm_exec or similar helpers.
657  *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
658  *				  u64 addr, u64 range,
659  *				  struct drm_gem_object *obj, u64 offset)
660  *	{
661  *		struct driver_context ctx;
662  *		struct drm_gpuvm_bo *vm_bo;
663  *		struct drm_gpuva_ops *ops;
664  *		struct drm_gpuva_op *op;
665  *		int ret = 0;
666  *
667  *		ctx.gpuvm = gpuvm;
668  *
669  *		ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
670  *		ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
671  *		ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
672  *		ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
673  *		if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
674  *			ret = -ENOMEM;
675  *			goto out;
676  *		}
677  *
678  *		// Typically protected with a driver specific GEM gpuva lock
679  *		// used in the fence signaling path for drm_gpuva_link() and
680  *		// drm_gpuva_unlink(), hence pre-allocate.
681  *		ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
682  *
683  *		driver_lock_va_space();
684  *		ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
685  *		driver_unlock_va_space();
686  *
687  *	out:
688  *		drm_gpuvm_bo_put(ctx.vm_bo);
689  *		kfree(ctx.new_va);
690  *		kfree(ctx.prev_va);
691  *		kfree(ctx.next_va);
692  *		return ret;
693  *	}
694  *
695  *	int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
696  *	{
697  *		struct driver_context *ctx = __ctx;
698  *
699  *		drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
700  *
701  *		drm_gpuva_link(ctx->new_va, ctx->vm_bo);
702  *
703  *		// prevent the new GPUVA from being freed in
704  *		// driver_mapping_create()
705  *		ctx->new_va = NULL;
706  *
707  *		return 0;
708  *	}
709  *
710  *	int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
711  *	{
712  *		struct driver_context *ctx = __ctx;
713  *		struct drm_gpuva *va = op->remap.unmap->va;
714  *
715  *		drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
716  *
717  *		if (op->remap.prev) {
718  *			drm_gpuva_link(ctx->prev_va, va->vm_bo);
719  *			ctx->prev_va = NULL;
720  *		}
721  *
722  *		if (op->remap.next) {
723  *			drm_gpuva_link(ctx->next_va, va->vm_bo);
724  *			ctx->next_va = NULL;
725  *		}
726  *
727  *		drm_gpuva_unlink(va);
728  *		kfree(va);
729  *
730  *		return 0;
731  *	}
732  *
733  *	int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
734  *	{
735  *		drm_gpuva_unlink(op->unmap.va);
736  *		drm_gpuva_unmap(&op->unmap);
737  *		kfree(op->unmap.va);
738  *
739  *		return 0;
740  *	}
741  */
742 
743 /**
744  * get_next_vm_bo_from_list() - get the next vm_bo element
745  * @__gpuvm: the &drm_gpuvm
746  * @__list_name: the name of the list we're iterating on
747  * @__local_list: a pointer to the local list used to store already iterated items
748  * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
749  *
750  * This helper is here to provide lockless list iteration. Lockless as in, the
751  * iterator releases the lock immediately after picking the first element from
752  * the list, so list insertion and deletion can happen concurrently.
753  *
754  * Elements popped from the original list are kept in a local list, so removal
755  * and is_empty checks can still happen while we're iterating the list.
756  */
757 #define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo)	\
758 	({										\
759 		struct drm_gpuvm_bo *__vm_bo = NULL;					\
760 											\
761 		drm_gpuvm_bo_put(__prev_vm_bo);						\
762 											\
763 		spin_lock(&(__gpuvm)->__list_name.lock);				\
764 		if (!(__gpuvm)->__list_name.local_list)					\
765 			(__gpuvm)->__list_name.local_list = __local_list;		\
766 		else									\
767 			drm_WARN_ON((__gpuvm)->drm,					\
768 				    (__gpuvm)->__list_name.local_list != __local_list);	\
769 											\
770 		while (!list_empty(&(__gpuvm)->__list_name.list)) {			\
771 			__vm_bo = list_first_entry(&(__gpuvm)->__list_name.list,	\
772 						   struct drm_gpuvm_bo,			\
773 						   list.entry.__list_name);		\
774 			if (kref_get_unless_zero(&__vm_bo->kref)) {			\
775 				list_move_tail(&(__vm_bo)->list.entry.__list_name,	\
776 					       __local_list);				\
777 				break;							\
778 			} else {							\
779 				list_del_init(&(__vm_bo)->list.entry.__list_name);	\
780 				__vm_bo = NULL;						\
781 			}								\
782 		}									\
783 		spin_unlock(&(__gpuvm)->__list_name.lock);				\
784 											\
785 		__vm_bo;								\
786 	})
787 
788 /**
789  * for_each_vm_bo_in_list() - internal vm_bo list iterator
790  * @__gpuvm: the &drm_gpuvm
791  * @__list_name: the name of the list we're iterating on
792  * @__local_list: a pointer to the local list used to store already iterated items
793  * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
794  *
795  * This helper is here to provide lockless list iteration. Lockless as in, the
796  * iterator releases the lock immediately after picking the first element from the
797  * list, hence list insertion and deletion can happen concurrently.
798  *
799  * It is not allowed to re-assign the vm_bo pointer from inside this loop.
800  *
801  * Typical use:
802  *
803  *	struct drm_gpuvm_bo *vm_bo;
804  *	LIST_HEAD(my_local_list);
805  *
806  *	ret = 0;
807  *	for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
808  *		ret = do_something_with_vm_bo(..., vm_bo);
809  *		if (ret)
810  *			break;
811  *	}
812  *	// Drop ref in case we break out of the loop.
813  *	drm_gpuvm_bo_put(vm_bo);
814  *	restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
815  *
816  *
817  * Only used for internal list iterations, not meant to be exposed to the outside
818  * world.
819  */
820 #define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo)	\
821 	for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
822 						__local_list, NULL);		\
823 	     __vm_bo;								\
824 	     __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
825 						__local_list, __vm_bo))
826 
827 static void
828 __restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
829 		     struct list_head *list, struct list_head **local_list)
830 {
831 	/* Merge back the two lists, moving local list elements to the
832 	 * head to preserve previous ordering, in case it matters.
833 	 */
834 	spin_lock(lock);
835 	if (*local_list) {
836 		list_splice(*local_list, list);
837 		*local_list = NULL;
838 	}
839 	spin_unlock(lock);
840 }
841 
842 /**
843  * restore_vm_bo_list() - move vm_bo elements back to their original list
844  * @__gpuvm: the &drm_gpuvm
845  * @__list_name: the name of the list we're iterating on
846  *
847  * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
848  * to restore the original state and let new iterations take place.
849  */
850 #define restore_vm_bo_list(__gpuvm, __list_name)			\
851 	__restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock,	\
852 			     &(__gpuvm)->__list_name.list,		\
853 			     &(__gpuvm)->__list_name.local_list)
854 
855 static void
856 cond_spin_lock(spinlock_t *lock, bool cond)
857 {
858 	if (cond)
859 		spin_lock(lock);
860 }
861 
862 static void
863 cond_spin_unlock(spinlock_t *lock, bool cond)
864 {
865 	if (cond)
866 		spin_unlock(lock);
867 }
868 
869 static void
870 __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
871 			struct list_head *entry, struct list_head *list)
872 {
873 	cond_spin_lock(lock, !!lock);
874 	if (list_empty(entry))
875 		list_add_tail(entry, list);
876 	cond_spin_unlock(lock, !!lock);
877 }
878 
879 /**
880  * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
881  * @vm_bo: the &drm_gpuvm_bo
882  *
883  * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
884  * immediately removed from the evict and extobj lists. Therefore, anyone
885  * iterating these lists should skip entries that are being destroyed.
886  *
887  * Checking the refcount without incrementing it is okay as long as the lock
888  * protecting the evict/extobj list is held for as long as you are using the
889  * vm_bo, because even if the refcount hits zero while you are using it, freeing
890  * the vm_bo requires taking the list's lock.
891  *
892  * Zombie entries can be observed on the evict and extobj lists regardless of
893  * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
894  * longer time when the resv lock is used because we can't take the resv lock
895  * during run_job() in immediate mode, meaning that they need to remain on the
896  * lists until drm_gpuvm_bo_deferred_cleanup() is called.
897  */
898 static bool
899 drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
900 {
901 	return !kref_read(&vm_bo->kref);
902 }
903 
904 /**
905  * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
906  * @__vm_bo: the &drm_gpuvm_bo
907  * @__list_name: the name of the list to insert into
908  * @__lock: whether to lock with the internal spinlock
909  *
910  * Inserts the given @__vm_bo into the list specified by @__list_name.
911  */
912 #define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock)			\
913 	__drm_gpuvm_bo_list_add((__vm_bo)->vm,					\
914 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
915 					 NULL,					\
916 				&(__vm_bo)->list.entry.__list_name,		\
917 				&(__vm_bo)->vm->__list_name.list)
918 
919 static void
920 __drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
921 			struct list_head *entry, bool init)
922 {
923 	cond_spin_lock(lock, !!lock);
924 	if (init) {
925 		if (!list_empty(entry))
926 			list_del_init(entry);
927 	} else {
928 		list_del(entry);
929 	}
930 	cond_spin_unlock(lock, !!lock);
931 }
932 
933 /**
934  * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
935  * @__vm_bo: the &drm_gpuvm_bo
936  * @__list_name: the name of the list to insert into
937  * @__lock: whether to lock with the internal spinlock
938  *
939  * Removes the given @__vm_bo from the list specified by @__list_name.
940  */
941 #define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock)		\
942 	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
943 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
944 					 NULL,					\
945 				&(__vm_bo)->list.entry.__list_name,		\
946 				true)
947 
948 /**
949  * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
950  * @__vm_bo: the &drm_gpuvm_bo
951  * @__list_name: the name of the list to insert into
952  * @__lock: whether to lock with the internal spinlock
953  *
954  * Removes the given @__vm_bo from the list specified by @__list_name.
955  */
956 #define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock)			\
957 	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
958 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
959 					 NULL,					\
960 				&(__vm_bo)->list.entry.__list_name,		\
961 				false)
962 
963 #define to_drm_gpuva(__node)	container_of((__node), struct drm_gpuva, rb.node)
964 
965 #define GPUVA_START(node) ((node)->va.addr)
966 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
967 
968 /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
969  * about this.
970  */
971 INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
972 		     GPUVA_START, GPUVA_LAST, static __maybe_unused,
973 		     drm_gpuva_it)
974 
975 static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
976 			      struct drm_gpuva *va);
977 static void __drm_gpuva_remove(struct drm_gpuva *va);
978 
979 static bool
980 drm_gpuvm_check_overflow(u64 addr, u64 range)
981 {
982 	u64 end;
983 
984 	return check_add_overflow(addr, range, &end);
985 }
986 
987 static bool
988 drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
989 {
990 	return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
991 			"GPUVA address limited to %zu bytes.\n", sizeof(addr));
992 }
993 
994 static bool
995 drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
996 {
997 	u64 end = addr + range;
998 	u64 mm_start = gpuvm->mm_start;
999 	u64 mm_end = mm_start + gpuvm->mm_range;
1000 
1001 	return addr >= mm_start && end <= mm_end;
1002 }
1003 
1004 static bool
1005 drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
1006 {
1007 	u64 end = addr + range;
1008 	u64 kstart = gpuvm->kernel_alloc_node.va.addr;
1009 	u64 krange = gpuvm->kernel_alloc_node.va.range;
1010 	u64 kend = kstart + krange;
1011 
1012 	return krange && addr < kend && kstart < end;
1013 }
1014 
1015 /**
1016  * drm_gpuvm_range_valid() - checks whether the given range is valid for the
1017  * given &drm_gpuvm
1018  * @gpuvm: the GPUVM to check the range for
1019  * @addr: the base address
1020  * @range: the range starting from the base address
1021  *
1022  * Checks whether the range is within the GPUVM's managed boundaries.
1023  *
1024  * Returns: true for a valid range, false otherwise
1025  */
1026 bool
1027 drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
1028 		      u64 addr, u64 range)
1029 {
1030 	return !drm_gpuvm_check_overflow(addr, range) &&
1031 	       drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
1032 	       !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
1033 }
1034 EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
1035 
1036 static void
1037 drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
1038 {
1039 	drm_gem_object_release(obj);
1040 	kfree(obj);
1041 }
1042 
1043 static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
1044 	.free = drm_gpuvm_gem_object_free,
1045 };
1046 
1047 /**
1048  * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
1049  * @drm: the drivers &drm_device
1050  *
1051  * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
1052  * order to serve as root GEM object providing the &drm_resv shared across
1053  * &drm_gem_objects local to a single GPUVM.
1054  *
1055  * Returns: the &drm_gem_object on success, NULL on failure
1056  */
1057 struct drm_gem_object *
1058 drm_gpuvm_resv_object_alloc(struct drm_device *drm)
1059 {
1060 	struct drm_gem_object *obj;
1061 
1062 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1063 	if (!obj)
1064 		return NULL;
1065 
1066 	obj->funcs = &drm_gpuvm_object_funcs;
1067 	drm_gem_private_object_init(drm, obj, 0);
1068 
1069 	return obj;
1070 }
1071 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
1072 
1073 /**
1074  * drm_gpuvm_init() - initialize a &drm_gpuvm
1075  * @gpuvm: pointer to the &drm_gpuvm to initialize
1076  * @name: the name of the GPU VA space
1077  * @flags: the &drm_gpuvm_flags for this GPUVM
1078  * @drm: the &drm_device this VM resides in
1079  * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
1080  * @start_offset: the start offset of the GPU VA space
1081  * @range: the size of the GPU VA space
1082  * @reserve_offset: the start of the kernel reserved GPU VA area
1083  * @reserve_range: the size of the kernel reserved GPU VA area
1084  * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
1085  *
1086  * The &drm_gpuvm must be initialized with this function before use.
1087  *
1088  * Note that @gpuvm must be cleared to 0 before calling this function. The given
1089  * &name is expected to be managed by the surrounding driver structures.
1090  */
1091 void
1092 drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
1093 	       enum drm_gpuvm_flags flags,
1094 	       struct drm_device *drm,
1095 	       struct drm_gem_object *r_obj,
1096 	       u64 start_offset, u64 range,
1097 	       u64 reserve_offset, u64 reserve_range,
1098 	       const struct drm_gpuvm_ops *ops)
1099 {
1100 	gpuvm->rb.tree = RB_ROOT_CACHED;
1101 	INIT_LIST_HEAD(&gpuvm->rb.list);
1102 
1103 	INIT_LIST_HEAD(&gpuvm->extobj.list);
1104 	spin_lock_init(&gpuvm->extobj.lock);
1105 
1106 	INIT_LIST_HEAD(&gpuvm->evict.list);
1107 	spin_lock_init(&gpuvm->evict.lock);
1108 
1109 	init_llist_head(&gpuvm->bo_defer);
1110 
1111 	kref_init(&gpuvm->kref);
1112 
1113 	gpuvm->name = name ? name : "unknown";
1114 	gpuvm->flags = flags;
1115 	gpuvm->ops = ops;
1116 	gpuvm->drm = drm;
1117 	gpuvm->r_obj = r_obj;
1118 
1119 	drm_gem_object_get(r_obj);
1120 
1121 	drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
1122 	gpuvm->mm_start = start_offset;
1123 	gpuvm->mm_range = range;
1124 
1125 	memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
1126 	if (reserve_range) {
1127 		gpuvm->kernel_alloc_node.va.addr = reserve_offset;
1128 		gpuvm->kernel_alloc_node.va.range = reserve_range;
1129 
1130 		if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
1131 							  reserve_range)))
1132 			__drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
1133 	}
1134 }
1135 EXPORT_SYMBOL_GPL(drm_gpuvm_init);
1136 
1137 static void
1138 drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
1139 {
1140 	gpuvm->name = NULL;
1141 
1142 	if (gpuvm->kernel_alloc_node.va.range)
1143 		__drm_gpuva_remove(&gpuvm->kernel_alloc_node);
1144 
1145 	drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
1146 		 "GPUVA tree is not empty, potentially leaking memory.\n");
1147 
1148 	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
1149 		 "Extobj list should be empty.\n");
1150 	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
1151 		 "Evict list should be empty.\n");
1152 	drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
1153 		 "VM BO cleanup list should be empty.\n");
1154 
1155 	drm_gem_object_put(gpuvm->r_obj);
1156 }
1157 
1158 static void
1159 drm_gpuvm_free(struct kref *kref)
1160 {
1161 	struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
1162 
1163 	drm_gpuvm_fini(gpuvm);
1164 
1165 	if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
1166 		return;
1167 
1168 	gpuvm->ops->vm_free(gpuvm);
1169 }
1170 
1171 /**
1172  * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1173  * @gpuvm: the &drm_gpuvm to release the reference of
1174  *
1175  * This releases a reference to @gpuvm.
1176  *
1177  * This function may be called from atomic context.
1178  */
1179 void
1180 drm_gpuvm_put(struct drm_gpuvm *gpuvm)
1181 {
1182 	if (gpuvm)
1183 		kref_put(&gpuvm->kref, drm_gpuvm_free);
1184 }
1185 EXPORT_SYMBOL_GPL(drm_gpuvm_put);
1186 
1187 static int
1188 exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
1189 		 unsigned int num_fences)
1190 {
1191 	return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
1192 			    drm_exec_lock_obj(exec, obj);
1193 }
1194 
1195 /**
1196  * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1197  * @gpuvm: the &drm_gpuvm
1198  * @exec: the &drm_exec context
1199  * @num_fences: the amount of &dma_fences to reserve
1200  *
1201  * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
1202  * @num_fences is zero drm_exec_lock_obj() is called instead.
1203  *
1204  * Using this function directly, it is the drivers responsibility to call
1205  * drm_exec_init() and drm_exec_fini() accordingly.
1206  *
1207  * Returns: 0 on success, negative error code on failure.
1208  */
1209 int
1210 drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
1211 		     struct drm_exec *exec,
1212 		     unsigned int num_fences)
1213 {
1214 	return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
1215 }
1216 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
1217 
1218 static int
1219 __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1220 			    struct drm_exec *exec,
1221 			    unsigned int num_fences)
1222 {
1223 	struct drm_gpuvm_bo *vm_bo;
1224 	LIST_HEAD(extobjs);
1225 	int ret = 0;
1226 
1227 	for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
1228 		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1229 		if (ret)
1230 			break;
1231 	}
1232 	/* Drop ref in case we break out of the loop. */
1233 	drm_gpuvm_bo_put(vm_bo);
1234 	restore_vm_bo_list(gpuvm, extobj);
1235 
1236 	return ret;
1237 }
1238 
1239 static int
1240 drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
1241 				 struct drm_exec *exec,
1242 				 unsigned int num_fences)
1243 {
1244 	struct drm_gpuvm_bo *vm_bo;
1245 	int ret = 0;
1246 
1247 	drm_gpuvm_resv_assert_held(gpuvm);
1248 	list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1249 		if (drm_gpuvm_bo_is_zombie(vm_bo))
1250 			continue;
1251 
1252 		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1253 		if (ret)
1254 			break;
1255 
1256 		if (vm_bo->evicted)
1257 			drm_gpuvm_bo_list_add(vm_bo, evict, false);
1258 	}
1259 
1260 	return ret;
1261 }
1262 
1263 /**
1264  * drm_gpuvm_prepare_objects() - prepare all associated BOs
1265  * @gpuvm: the &drm_gpuvm
1266  * @exec: the &drm_exec locking context
1267  * @num_fences: the amount of &dma_fences to reserve
1268  *
1269  * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
1270  * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
1271  * is called instead.
1272  *
1273  * Using this function directly, it is the drivers responsibility to call
1274  * drm_exec_init() and drm_exec_fini() accordingly.
1275  *
1276  * Note: This function is safe against concurrent insertion and removal of
1277  * external objects, however it is not safe against concurrent usage itself.
1278  *
1279  * Drivers need to make sure to protect this case with either an outer VM lock
1280  * or by calling drm_gpuvm_prepare_vm() before this function within the
1281  * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1282  * mutual exclusion.
1283  *
1284  * Returns: 0 on success, negative error code on failure.
1285  */
1286 int
1287 drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1288 			  struct drm_exec *exec,
1289 			  unsigned int num_fences)
1290 {
1291 	if (drm_gpuvm_resv_protected(gpuvm))
1292 		return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
1293 							num_fences);
1294 	else
1295 		return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1296 }
1297 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
1298 
1299 /**
1300  * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1301  * @gpuvm: the &drm_gpuvm
1302  * @exec: the &drm_exec locking context
1303  * @addr: the start address within the VA space
1304  * @range: the range to iterate within the VA space
1305  * @num_fences: the amount of &dma_fences to reserve
1306  *
1307  * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
1308  * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
1309  * instead.
1310  *
1311  * Returns: 0 on success, negative error code on failure.
1312  */
1313 int
1314 drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1315 			u64 addr, u64 range, unsigned int num_fences)
1316 {
1317 	struct drm_gpuva *va;
1318 	u64 end = addr + range;
1319 	int ret;
1320 
1321 	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
1322 		struct drm_gem_object *obj = va->gem.obj;
1323 
1324 		ret = exec_prepare_obj(exec, obj, num_fences);
1325 		if (ret)
1326 			return ret;
1327 	}
1328 
1329 	return 0;
1330 }
1331 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
1332 
1333 /**
1334  * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
1335  * @vm_exec: the &drm_gpuvm_exec wrapper
1336  *
1337  * Acquires all dma-resv locks of all &drm_gem_objects the given
1338  * &drm_gpuvm contains mappings of.
1339  *
1340  * Additionally, when calling this function with struct drm_gpuvm_exec::extra
1341  * being set the driver receives the given @fn callback to lock additional
1342  * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1343  * would call drm_exec_prepare_obj() from within this callback.
1344  *
1345  * Returns: 0 on success, negative error code on failure.
1346  */
1347 int
1348 drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
1349 {
1350 	struct drm_gpuvm *gpuvm = vm_exec->vm;
1351 	struct drm_exec *exec = &vm_exec->exec;
1352 	unsigned int num_fences = vm_exec->num_fences;
1353 	int ret;
1354 
1355 	drm_exec_init(exec, vm_exec->flags, 0);
1356 
1357 	drm_exec_until_all_locked(exec) {
1358 		ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
1359 		drm_exec_retry_on_contention(exec);
1360 		if (ret)
1361 			goto err;
1362 
1363 		ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1364 		drm_exec_retry_on_contention(exec);
1365 		if (ret)
1366 			goto err;
1367 
1368 		if (vm_exec->extra.fn) {
1369 			ret = vm_exec->extra.fn(vm_exec);
1370 			drm_exec_retry_on_contention(exec);
1371 			if (ret)
1372 				goto err;
1373 		}
1374 	}
1375 
1376 	return 0;
1377 
1378 err:
1379 	drm_exec_fini(exec);
1380 	return ret;
1381 }
1382 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
1383 
1384 static int
1385 fn_lock_array(struct drm_gpuvm_exec *vm_exec)
1386 {
1387 	struct {
1388 		struct drm_gem_object **objs;
1389 		unsigned int num_objs;
1390 	} *args = vm_exec->extra.priv;
1391 
1392 	return drm_exec_prepare_array(&vm_exec->exec, args->objs,
1393 				      args->num_objs, vm_exec->num_fences);
1394 }
1395 
1396 /**
1397  * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
1398  * @vm_exec: the &drm_gpuvm_exec wrapper
1399  * @objs: additional &drm_gem_objects to lock
1400  * @num_objs: the number of additional &drm_gem_objects to lock
1401  *
1402  * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1403  * contains mappings of, plus the ones given through @objs.
1404  *
1405  * Returns: 0 on success, negative error code on failure.
1406  */
1407 int
1408 drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
1409 			  struct drm_gem_object **objs,
1410 			  unsigned int num_objs)
1411 {
1412 	struct {
1413 		struct drm_gem_object **objs;
1414 		unsigned int num_objs;
1415 	} args;
1416 
1417 	args.objs = objs;
1418 	args.num_objs = num_objs;
1419 
1420 	vm_exec->extra.fn = fn_lock_array;
1421 	vm_exec->extra.priv = &args;
1422 
1423 	return drm_gpuvm_exec_lock(vm_exec);
1424 }
1425 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
1426 
1427 /**
1428  * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1429  * @vm_exec: the &drm_gpuvm_exec wrapper
1430  * @addr: the start address within the VA space
1431  * @range: the range to iterate within the VA space
1432  *
1433  * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1434  * @addr + @range.
1435  *
1436  * Returns: 0 on success, negative error code on failure.
1437  */
1438 int
1439 drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
1440 			  u64 addr, u64 range)
1441 {
1442 	struct drm_gpuvm *gpuvm = vm_exec->vm;
1443 	struct drm_exec *exec = &vm_exec->exec;
1444 	int ret;
1445 
1446 	drm_exec_init(exec, vm_exec->flags, 0);
1447 
1448 	drm_exec_until_all_locked(exec) {
1449 		ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
1450 					      vm_exec->num_fences);
1451 		drm_exec_retry_on_contention(exec);
1452 		if (ret)
1453 			goto err;
1454 	}
1455 
1456 	return ret;
1457 
1458 err:
1459 	drm_exec_fini(exec);
1460 	return ret;
1461 }
1462 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
1463 
1464 static int
1465 __drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1466 {
1467 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1468 	struct drm_gpuvm_bo *vm_bo;
1469 	LIST_HEAD(evict);
1470 	int ret = 0;
1471 
1472 	for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
1473 		ret = ops->vm_bo_validate(vm_bo, exec);
1474 		if (ret)
1475 			break;
1476 	}
1477 	/* Drop ref in case we break out of the loop. */
1478 	drm_gpuvm_bo_put(vm_bo);
1479 	restore_vm_bo_list(gpuvm, evict);
1480 
1481 	return ret;
1482 }
1483 
1484 static int
1485 drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1486 {
1487 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1488 	struct drm_gpuvm_bo *vm_bo, *next;
1489 	int ret = 0;
1490 
1491 	drm_gpuvm_resv_assert_held(gpuvm);
1492 
1493 	list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
1494 				 list.entry.evict) {
1495 		if (drm_gpuvm_bo_is_zombie(vm_bo))
1496 			continue;
1497 
1498 		ret = ops->vm_bo_validate(vm_bo, exec);
1499 		if (ret)
1500 			break;
1501 
1502 		dma_resv_assert_held(vm_bo->obj->resv);
1503 		if (!vm_bo->evicted)
1504 			drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
1505 	}
1506 
1507 	return ret;
1508 }
1509 
1510 /**
1511  * drm_gpuvm_validate() - validate all BOs marked as evicted
1512  * @gpuvm: the &drm_gpuvm to validate evicted BOs
1513  * @exec: the &drm_exec instance used for locking the GPUVM
1514  *
1515  * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
1516  * objects being mapped in the given &drm_gpuvm.
1517  *
1518  * Returns: 0 on success, negative error code on failure.
1519  */
1520 int
1521 drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1522 {
1523 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1524 
1525 	if (unlikely(!ops || !ops->vm_bo_validate))
1526 		return -EOPNOTSUPP;
1527 
1528 	if (drm_gpuvm_resv_protected(gpuvm))
1529 		return drm_gpuvm_validate_locked(gpuvm, exec);
1530 	else
1531 		return __drm_gpuvm_validate(gpuvm, exec);
1532 }
1533 EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
1534 
1535 /**
1536  * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1537  * dma-resv
1538  * @gpuvm: the &drm_gpuvm to add a fence to
1539  * @exec: the &drm_exec locking context
1540  * @fence: fence to add
1541  * @private_usage: private dma-resv usage
1542  * @extobj_usage: extobj dma-resv usage
1543  */
1544 void
1545 drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
1546 			 struct drm_exec *exec,
1547 			 struct dma_fence *fence,
1548 			 enum dma_resv_usage private_usage,
1549 			 enum dma_resv_usage extobj_usage)
1550 {
1551 	struct drm_gem_object *obj;
1552 	unsigned long index;
1553 
1554 	drm_exec_for_each_locked_object(exec, index, obj) {
1555 		dma_resv_assert_held(obj->resv);
1556 		dma_resv_add_fence(obj->resv, fence,
1557 				   drm_gpuvm_is_extobj(gpuvm, obj) ?
1558 				   extobj_usage : private_usage);
1559 	}
1560 }
1561 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
1562 
1563 /**
1564  * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1565  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1566  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1567  *
1568  * If provided by the driver, this function uses the &drm_gpuvm_ops
1569  * vm_bo_alloc() callback to allocate.
1570  *
1571  * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1572  */
1573 struct drm_gpuvm_bo *
1574 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
1575 		    struct drm_gem_object *obj)
1576 {
1577 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1578 	struct drm_gpuvm_bo *vm_bo;
1579 
1580 	if (ops && ops->vm_bo_alloc)
1581 		vm_bo = ops->vm_bo_alloc();
1582 	else
1583 		vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
1584 
1585 	if (unlikely(!vm_bo))
1586 		return NULL;
1587 
1588 	vm_bo->vm = drm_gpuvm_get(gpuvm);
1589 	vm_bo->obj = obj;
1590 	drm_gem_object_get(obj);
1591 
1592 	kref_init(&vm_bo->kref);
1593 	INIT_LIST_HEAD(&vm_bo->list.gpuva);
1594 	INIT_LIST_HEAD(&vm_bo->list.entry.gem);
1595 
1596 	INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
1597 	INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1598 	init_llist_node(&vm_bo->list.entry.bo_defer);
1599 
1600 	return vm_bo;
1601 }
1602 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
1603 
1604 static void
1605 drm_gpuvm_bo_destroy(struct kref *kref)
1606 {
1607 	struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1608 						  kref);
1609 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1610 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1611 	struct drm_gem_object *obj = vm_bo->obj;
1612 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1613 
1614 	if (!lock)
1615 		drm_gpuvm_resv_assert_held(gpuvm);
1616 
1617 	drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
1618 	drm_gpuvm_bo_list_del(vm_bo, evict, lock);
1619 
1620 	drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1621 	list_del(&vm_bo->list.entry.gem);
1622 
1623 	if (ops && ops->vm_bo_free)
1624 		ops->vm_bo_free(vm_bo);
1625 	else
1626 		kfree(vm_bo);
1627 
1628 	drm_gpuvm_put(gpuvm);
1629 	drm_gem_object_put(obj);
1630 }
1631 
1632 /**
1633  * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1634  * @vm_bo: the &drm_gpuvm_bo to release the reference of
1635  *
1636  * This releases a reference to @vm_bo.
1637  *
1638  * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
1639  * includes removing it from the GEMs gpuva list. Hence, if a call to this
1640  * function can potentially let the reference count drop to zero the caller must
1641  * hold the lock that the GEM uses for its gpuva list (either the GEM's
1642  * dma-resv or gpuva.lock mutex).
1643  *
1644  * This function may only be called from non-atomic context.
1645  *
1646  * Returns: true if vm_bo was destroyed, false otherwise.
1647  */
1648 bool
1649 drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
1650 {
1651 	might_sleep();
1652 
1653 	if (vm_bo)
1654 		return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
1655 
1656 	return false;
1657 }
1658 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
1659 
1660 /*
1661  * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
1662  * deferred cleanup
1663  *
1664  * If deferred cleanup is used, then this must be called right after the vm_bo
1665  * refcount drops to zero. Must be called with GEM mutex held. After releasing
1666  * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
1667  */
1668 static void
1669 drm_gpuvm_bo_into_zombie(struct kref *kref)
1670 {
1671 	struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1672 						  kref);
1673 
1674 	if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
1675 		drm_gpuvm_bo_list_del(vm_bo, extobj, true);
1676 		drm_gpuvm_bo_list_del(vm_bo, evict, true);
1677 	}
1678 
1679 	list_del(&vm_bo->list.entry.gem);
1680 }
1681 
1682 /*
1683  * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
1684  * bo_defer list
1685  *
1686  * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
1687  *
1688  * It's important that the GEM stays alive for the duration in which we hold
1689  * the mutex, but the instant we add the vm_bo to bo_defer, another thread
1690  * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
1691  * avoid kfreeing a mutex we are holding, the GEM mutex must be released
1692  * *before* calling this function.
1693  */
1694 static void
1695 drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
1696 {
1697 	llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
1698 }
1699 
1700 static void
1701 drm_gpuvm_bo_defer_free(struct kref *kref)
1702 {
1703 	struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1704 						  kref);
1705 
1706 	drm_gpuvm_bo_into_zombie(kref);
1707 	mutex_unlock(&vm_bo->obj->gpuva.lock);
1708 	drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
1709 }
1710 
1711 /**
1712  * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
1713  * deferred cleanup
1714  * @vm_bo: the &drm_gpuvm_bo to release the reference of
1715  *
1716  * This releases a reference to @vm_bo.
1717  *
1718  * This might take and release the GEMs GPUVA lock. You should call
1719  * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
1720  *
1721  * Returns: true if vm_bo is being destroyed, false otherwise.
1722  */
1723 bool
1724 drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
1725 {
1726 	if (!vm_bo)
1727 		return false;
1728 
1729 	drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
1730 
1731 	return !!kref_put_mutex(&vm_bo->kref,
1732 				drm_gpuvm_bo_defer_free,
1733 				&vm_bo->obj->gpuva.lock);
1734 }
1735 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
1736 
1737 /**
1738  * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
1739  * deferred cleanup
1740  * @gpuvm: the VM to clean up
1741  *
1742  * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
1743  */
1744 void
1745 drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
1746 {
1747 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1748 	struct drm_gpuvm_bo *vm_bo;
1749 	struct drm_gem_object *obj;
1750 	struct llist_node *bo_defer;
1751 
1752 	bo_defer = llist_del_all(&gpuvm->bo_defer);
1753 	if (!bo_defer)
1754 		return;
1755 
1756 	if (drm_gpuvm_resv_protected(gpuvm)) {
1757 		dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
1758 		llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
1759 			drm_gpuvm_bo_list_del(vm_bo, extobj, false);
1760 			drm_gpuvm_bo_list_del(vm_bo, evict, false);
1761 		}
1762 		dma_resv_unlock(drm_gpuvm_resv(gpuvm));
1763 	}
1764 
1765 	while (bo_defer) {
1766 		vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
1767 		bo_defer = bo_defer->next;
1768 		obj = vm_bo->obj;
1769 		if (ops && ops->vm_bo_free)
1770 			ops->vm_bo_free(vm_bo);
1771 		else
1772 			kfree(vm_bo);
1773 
1774 		drm_gpuvm_put(gpuvm);
1775 		drm_gem_object_put(obj);
1776 	}
1777 }
1778 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
1779 
1780 static struct drm_gpuvm_bo *
1781 __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1782 		    struct drm_gem_object *obj)
1783 {
1784 	struct drm_gpuvm_bo *vm_bo;
1785 
1786 	drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1787 	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
1788 		if (vm_bo->vm == gpuvm)
1789 			return vm_bo;
1790 
1791 	return NULL;
1792 }
1793 
1794 /**
1795  * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1796  * &drm_gpuvm and &drm_gem_object
1797  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1798  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1799  *
1800  * Find the &drm_gpuvm_bo representing the combination of the given
1801  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1802  * count of the &drm_gpuvm_bo accordingly.
1803  *
1804  * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1805  */
1806 struct drm_gpuvm_bo *
1807 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1808 		  struct drm_gem_object *obj)
1809 {
1810 	struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
1811 
1812 	return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
1813 }
1814 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
1815 
1816 /**
1817  * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
1818  * given &drm_gpuvm and &drm_gem_object
1819  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1820  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1821  *
1822  * Find the &drm_gpuvm_bo representing the combination of the given
1823  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1824  * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
1825  * &drm_gpuvm_bo.
1826  *
1827  * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1828  *
1829  * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
1830  */
1831 struct drm_gpuvm_bo *
1832 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
1833 		    struct drm_gem_object *obj)
1834 {
1835 	struct drm_gpuvm_bo *vm_bo;
1836 
1837 	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1838 	if (vm_bo)
1839 		return vm_bo;
1840 
1841 	vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
1842 	if (!vm_bo)
1843 		return ERR_PTR(-ENOMEM);
1844 
1845 	drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1846 	list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
1847 
1848 	return vm_bo;
1849 }
1850 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
1851 
1852 /**
1853  * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
1854  * for the given &drm_gpuvm and &drm_gem_object
1855  * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1856  *
1857  * Find the &drm_gpuvm_bo representing the combination of the given
1858  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1859  * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
1860  * count is decreased. If not found @__vm_bo is returned without further
1861  * increase of the reference count.
1862  *
1863  * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1864  *
1865  * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
1866  * &drm_gpuvm_bo was found
1867  */
1868 struct drm_gpuvm_bo *
1869 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
1870 {
1871 	struct drm_gpuvm *gpuvm = __vm_bo->vm;
1872 	struct drm_gem_object *obj = __vm_bo->obj;
1873 	struct drm_gpuvm_bo *vm_bo;
1874 
1875 	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1876 	if (vm_bo) {
1877 		drm_gpuvm_bo_put(__vm_bo);
1878 		return vm_bo;
1879 	}
1880 
1881 	drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1882 	list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
1883 
1884 	return __vm_bo;
1885 }
1886 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
1887 
1888 /**
1889  * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1890  * extobj list
1891  * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
1892  *
1893  * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
1894  * already and if the corresponding &drm_gem_object is an external object,
1895  * actually.
1896  */
1897 void
1898 drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
1899 {
1900 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1901 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1902 
1903 	if (!lock)
1904 		drm_gpuvm_resv_assert_held(gpuvm);
1905 
1906 	if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
1907 		drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
1908 }
1909 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
1910 
1911 /**
1912  * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1913  * evicted list
1914  * @vm_bo: the &drm_gpuvm_bo to add or remove
1915  * @evict: indicates whether the object is evicted
1916  *
1917  * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
1918  */
1919 void
1920 drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
1921 {
1922 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1923 	struct drm_gem_object *obj = vm_bo->obj;
1924 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1925 
1926 	dma_resv_assert_held(obj->resv);
1927 	vm_bo->evicted = evict;
1928 
1929 	/* Can't add external objects to the evicted list directly if not using
1930 	 * internal spinlocks, since in this case the evicted list is protected
1931 	 * with the VM's common dma-resv lock.
1932 	 */
1933 	if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
1934 		return;
1935 
1936 	if (evict)
1937 		drm_gpuvm_bo_list_add(vm_bo, evict, lock);
1938 	else
1939 		drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
1940 }
1941 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
1942 
1943 static int
1944 __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1945 		   struct drm_gpuva *va)
1946 {
1947 	struct rb_node *node;
1948 	struct list_head *head;
1949 
1950 	if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
1951 				    GPUVA_START(va),
1952 				    GPUVA_LAST(va)))
1953 		return -EEXIST;
1954 
1955 	va->vm = gpuvm;
1956 
1957 	drm_gpuva_it_insert(va, &gpuvm->rb.tree);
1958 
1959 	node = rb_prev(&va->rb.node);
1960 	if (node)
1961 		head = &(to_drm_gpuva(node))->rb.entry;
1962 	else
1963 		head = &gpuvm->rb.list;
1964 
1965 	list_add(&va->rb.entry, head);
1966 
1967 	return 0;
1968 }
1969 
1970 /**
1971  * drm_gpuva_insert() - insert a &drm_gpuva
1972  * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
1973  * @va: the &drm_gpuva to insert
1974  *
1975  * Insert a &drm_gpuva with a given address and range into a
1976  * &drm_gpuvm.
1977  *
1978  * It is safe to use this function using the safe versions of iterating the GPU
1979  * VA space, such as drm_gpuvm_for_each_va_safe() and
1980  * drm_gpuvm_for_each_va_range_safe().
1981  *
1982  * Returns: 0 on success, negative error code on failure.
1983  */
1984 int
1985 drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1986 		 struct drm_gpuva *va)
1987 {
1988 	u64 addr = va->va.addr;
1989 	u64 range = va->va.range;
1990 	int ret;
1991 
1992 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
1993 		return -EINVAL;
1994 
1995 	ret = __drm_gpuva_insert(gpuvm, va);
1996 	if (likely(!ret))
1997 		/* Take a reference of the GPUVM for the successfully inserted
1998 		 * drm_gpuva. We can't take the reference in
1999 		 * __drm_gpuva_insert() itself, since we don't want to increse
2000 		 * the reference count for the GPUVM's kernel_alloc_node.
2001 		 */
2002 		drm_gpuvm_get(gpuvm);
2003 
2004 	return ret;
2005 }
2006 EXPORT_SYMBOL_GPL(drm_gpuva_insert);
2007 
2008 static void
2009 __drm_gpuva_remove(struct drm_gpuva *va)
2010 {
2011 	drm_gpuva_it_remove(va, &va->vm->rb.tree);
2012 	list_del_init(&va->rb.entry);
2013 }
2014 
2015 /**
2016  * drm_gpuva_remove() - remove a &drm_gpuva
2017  * @va: the &drm_gpuva to remove
2018  *
2019  * This removes the given &va from the underlying tree.
2020  *
2021  * It is safe to use this function using the safe versions of iterating the GPU
2022  * VA space, such as drm_gpuvm_for_each_va_safe() and
2023  * drm_gpuvm_for_each_va_range_safe().
2024  */
2025 void
2026 drm_gpuva_remove(struct drm_gpuva *va)
2027 {
2028 	struct drm_gpuvm *gpuvm = va->vm;
2029 
2030 	if (unlikely(va == &gpuvm->kernel_alloc_node)) {
2031 		drm_WARN(gpuvm->drm, 1,
2032 			 "Can't destroy kernel reserved node.\n");
2033 		return;
2034 	}
2035 
2036 	__drm_gpuva_remove(va);
2037 	drm_gpuvm_put(va->vm);
2038 }
2039 EXPORT_SYMBOL_GPL(drm_gpuva_remove);
2040 
2041 /**
2042  * drm_gpuva_link() - link a &drm_gpuva
2043  * @va: the &drm_gpuva to link
2044  * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
2045  *
2046  * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
2047  * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
2048  *
2049  * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
2050  * reference of the latter is taken.
2051  *
2052  * This function expects the caller to protect the GEM's GPUVA list against
2053  * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
2054  */
2055 void
2056 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
2057 {
2058 	struct drm_gem_object *obj = va->gem.obj;
2059 	struct drm_gpuvm *gpuvm = va->vm;
2060 
2061 	if (unlikely(!obj))
2062 		return;
2063 
2064 	drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
2065 
2066 	va->vm_bo = drm_gpuvm_bo_get(vm_bo);
2067 
2068 	drm_gem_gpuva_assert_lock_held(gpuvm, obj);
2069 	list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
2070 }
2071 EXPORT_SYMBOL_GPL(drm_gpuva_link);
2072 
2073 /**
2074  * drm_gpuva_unlink() - unlink a &drm_gpuva
2075  * @va: the &drm_gpuva to unlink
2076  *
2077  * This removes the given &va from the GPU VA list of the &drm_gem_object it is
2078  * associated with.
2079  *
2080  * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
2081  * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
2082  * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
2083  *
2084  * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
2085  * the latter is dropped.
2086  *
2087  * This function expects the caller to protect the GEM's GPUVA list against
2088  * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
2089  */
2090 void
2091 drm_gpuva_unlink(struct drm_gpuva *va)
2092 {
2093 	struct drm_gem_object *obj = va->gem.obj;
2094 	struct drm_gpuvm_bo *vm_bo = va->vm_bo;
2095 
2096 	if (unlikely(!obj))
2097 		return;
2098 
2099 	drm_gem_gpuva_assert_lock_held(va->vm, obj);
2100 	list_del_init(&va->gem.entry);
2101 
2102 	va->vm_bo = NULL;
2103 	drm_gpuvm_bo_put(vm_bo);
2104 }
2105 EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
2106 
2107 /**
2108  * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
2109  * @va: the &drm_gpuva to unlink
2110  *
2111  * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
2112  * the lock for the caller.
2113  */
2114 void
2115 drm_gpuva_unlink_defer(struct drm_gpuva *va)
2116 {
2117 	struct drm_gem_object *obj = va->gem.obj;
2118 	struct drm_gpuvm_bo *vm_bo = va->vm_bo;
2119 	bool should_defer_bo;
2120 
2121 	if (unlikely(!obj))
2122 		return;
2123 
2124 	drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
2125 
2126 	mutex_lock(&obj->gpuva.lock);
2127 	list_del_init(&va->gem.entry);
2128 
2129 	/*
2130 	 * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
2131 	 */
2132 	should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
2133 	mutex_unlock(&obj->gpuva.lock);
2134 	if (should_defer_bo)
2135 		drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
2136 
2137 	va->vm_bo = NULL;
2138 }
2139 EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
2140 
2141 /**
2142  * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
2143  * @gpuvm: the &drm_gpuvm to search in
2144  * @addr: the &drm_gpuvas address
2145  * @range: the &drm_gpuvas range
2146  *
2147  * Returns: the first &drm_gpuva within the given range
2148  */
2149 struct drm_gpuva *
2150 drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
2151 		     u64 addr, u64 range)
2152 {
2153 	u64 last = addr + range - 1;
2154 
2155 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
2156 }
2157 EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
2158 
2159 /**
2160  * drm_gpuva_find() - find a &drm_gpuva
2161  * @gpuvm: the &drm_gpuvm to search in
2162  * @addr: the &drm_gpuvas address
2163  * @range: the &drm_gpuvas range
2164  *
2165  * Returns: the &drm_gpuva at a given &addr and with a given &range
2166  */
2167 struct drm_gpuva *
2168 drm_gpuva_find(struct drm_gpuvm *gpuvm,
2169 	       u64 addr, u64 range)
2170 {
2171 	struct drm_gpuva *va;
2172 
2173 	va = drm_gpuva_find_first(gpuvm, addr, range);
2174 	if (!va)
2175 		goto out;
2176 
2177 	if (va->va.addr != addr ||
2178 	    va->va.range != range)
2179 		goto out;
2180 
2181 	return va;
2182 
2183 out:
2184 	return NULL;
2185 }
2186 EXPORT_SYMBOL_GPL(drm_gpuva_find);
2187 
2188 /**
2189  * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
2190  * @gpuvm: the &drm_gpuvm to search in
2191  * @start: the given GPU VA's start address
2192  *
2193  * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
2194  *
2195  * Note that if there is any free space between the GPU VA mappings no mapping
2196  * is returned.
2197  *
2198  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2199  */
2200 struct drm_gpuva *
2201 drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
2202 {
2203 	if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
2204 		return NULL;
2205 
2206 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
2207 }
2208 EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
2209 
2210 /**
2211  * drm_gpuva_find_next() - find the &drm_gpuva after the given address
2212  * @gpuvm: the &drm_gpuvm to search in
2213  * @end: the given GPU VA's end address
2214  *
2215  * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
2216  *
2217  * Note that if there is any free space between the GPU VA mappings no mapping
2218  * is returned.
2219  *
2220  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2221  */
2222 struct drm_gpuva *
2223 drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
2224 {
2225 	if (!drm_gpuvm_range_valid(gpuvm, end, 1))
2226 		return NULL;
2227 
2228 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
2229 }
2230 EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
2231 
2232 /**
2233  * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
2234  * is empty
2235  * @gpuvm: the &drm_gpuvm to check the range for
2236  * @addr: the start address of the range
2237  * @range: the range of the interval
2238  *
2239  * Returns: true if the interval is empty, false otherwise
2240  */
2241 bool
2242 drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
2243 {
2244 	return !drm_gpuva_find_first(gpuvm, addr, range);
2245 }
2246 EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
2247 
2248 /**
2249  * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
2250  * &drm_gpuva_op_map
2251  * @gpuvm: the &drm_gpuvm
2252  * @va: the &drm_gpuva to insert
2253  * @op: the &drm_gpuva_op_map to initialize @va with
2254  *
2255  * Initializes the @va from the @op and inserts it into the given @gpuvm.
2256  */
2257 void
2258 drm_gpuva_map(struct drm_gpuvm *gpuvm,
2259 	      struct drm_gpuva *va,
2260 	      struct drm_gpuva_op_map *op)
2261 {
2262 	drm_gpuva_init_from_op(va, op);
2263 	drm_gpuva_insert(gpuvm, va);
2264 }
2265 EXPORT_SYMBOL_GPL(drm_gpuva_map);
2266 
2267 /**
2268  * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2269  * &drm_gpuva_op_remap
2270  * @prev: the &drm_gpuva to remap when keeping the start of a mapping
2271  * @next: the &drm_gpuva to remap when keeping the end of a mapping
2272  * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
2273  *
2274  * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
2275  * @next.
2276  */
2277 void
2278 drm_gpuva_remap(struct drm_gpuva *prev,
2279 		struct drm_gpuva *next,
2280 		struct drm_gpuva_op_remap *op)
2281 {
2282 	struct drm_gpuva *va = op->unmap->va;
2283 	struct drm_gpuvm *gpuvm = va->vm;
2284 
2285 	drm_gpuva_remove(va);
2286 
2287 	if (op->prev) {
2288 		drm_gpuva_init_from_op(prev, op->prev);
2289 		drm_gpuva_insert(gpuvm, prev);
2290 	}
2291 
2292 	if (op->next) {
2293 		drm_gpuva_init_from_op(next, op->next);
2294 		drm_gpuva_insert(gpuvm, next);
2295 	}
2296 }
2297 EXPORT_SYMBOL_GPL(drm_gpuva_remap);
2298 
2299 /**
2300  * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2301  * &drm_gpuva_op_unmap
2302  * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
2303  *
2304  * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
2305  */
2306 void
2307 drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
2308 {
2309 	drm_gpuva_remove(op->va);
2310 }
2311 EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
2312 
2313 static int
2314 op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2315 	  const struct drm_gpuvm_map_req *req)
2316 {
2317 	struct drm_gpuva_op op = {};
2318 
2319 	if (!req)
2320 		return 0;
2321 
2322 	op.op = DRM_GPUVA_OP_MAP;
2323 	op.map.va.addr = req->map.va.addr;
2324 	op.map.va.range = req->map.va.range;
2325 	op.map.gem.obj = req->map.gem.obj;
2326 	op.map.gem.offset = req->map.gem.offset;
2327 
2328 	return fn->sm_step_map(&op, priv);
2329 }
2330 
2331 static int
2332 op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2333 	    struct drm_gpuva_op_map *prev,
2334 	    struct drm_gpuva_op_map *next,
2335 	    struct drm_gpuva_op_unmap *unmap)
2336 {
2337 	struct drm_gpuva_op op = {};
2338 	struct drm_gpuva_op_remap *r;
2339 
2340 	op.op = DRM_GPUVA_OP_REMAP;
2341 	r = &op.remap;
2342 	r->prev = prev;
2343 	r->next = next;
2344 	r->unmap = unmap;
2345 
2346 	return fn->sm_step_remap(&op, priv);
2347 }
2348 
2349 static int
2350 op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2351 	    struct drm_gpuva *va, bool merge, bool madvise)
2352 {
2353 	struct drm_gpuva_op op = {};
2354 
2355 	if (madvise)
2356 		return 0;
2357 
2358 	op.op = DRM_GPUVA_OP_UNMAP;
2359 	op.unmap.va = va;
2360 	op.unmap.keep = merge;
2361 
2362 	return fn->sm_step_unmap(&op, priv);
2363 }
2364 
2365 static int
2366 __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
2367 		   const struct drm_gpuvm_ops *ops, void *priv,
2368 		   const struct drm_gpuvm_map_req *req,
2369 		   bool madvise)
2370 {
2371 	struct drm_gem_object *req_obj = req->map.gem.obj;
2372 	const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
2373 	struct drm_gpuva *va, *next;
2374 	u64 req_offset = req->map.gem.offset;
2375 	u64 req_range = req->map.va.range;
2376 	u64 req_addr = req->map.va.addr;
2377 	u64 req_end = req_addr + req_range;
2378 	int ret;
2379 
2380 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2381 		return -EINVAL;
2382 
2383 	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2384 		struct drm_gem_object *obj = va->gem.obj;
2385 		u64 offset = va->gem.offset;
2386 		u64 addr = va->va.addr;
2387 		u64 range = va->va.range;
2388 		u64 end = addr + range;
2389 		bool merge = !!va->gem.obj;
2390 
2391 		if (madvise && obj)
2392 			continue;
2393 
2394 		if (addr == req_addr) {
2395 			merge &= obj == req_obj &&
2396 				 offset == req_offset;
2397 
2398 			if (end == req_end) {
2399 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2400 				if (ret)
2401 					return ret;
2402 				break;
2403 			}
2404 
2405 			if (end < req_end) {
2406 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2407 				if (ret)
2408 					return ret;
2409 				continue;
2410 			}
2411 
2412 			if (end > req_end) {
2413 				struct drm_gpuva_op_map n = {
2414 					.va.addr = req_end,
2415 					.va.range = range - req_range,
2416 					.gem.obj = obj,
2417 					.gem.offset = offset + req_range,
2418 				};
2419 				struct drm_gpuva_op_unmap u = {
2420 					.va = va,
2421 					.keep = merge,
2422 				};
2423 
2424 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2425 				if (ret)
2426 					return ret;
2427 
2428 				if (madvise)
2429 					op_map = req;
2430 				break;
2431 			}
2432 		} else if (addr < req_addr) {
2433 			u64 ls_range = req_addr - addr;
2434 			struct drm_gpuva_op_map p = {
2435 				.va.addr = addr,
2436 				.va.range = ls_range,
2437 				.gem.obj = obj,
2438 				.gem.offset = offset,
2439 			};
2440 			struct drm_gpuva_op_unmap u = { .va = va };
2441 
2442 			merge &= obj == req_obj &&
2443 				 offset + ls_range == req_offset;
2444 			u.keep = merge;
2445 
2446 			if (end == req_end) {
2447 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2448 				if (ret)
2449 					return ret;
2450 
2451 				if (madvise)
2452 					op_map = req;
2453 				break;
2454 			}
2455 
2456 			if (end < req_end) {
2457 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2458 				if (ret)
2459 					return ret;
2460 
2461 				if (madvise) {
2462 					struct drm_gpuvm_map_req map_req = {
2463 						.map.va.addr =  req_addr,
2464 						.map.va.range = end - req_addr,
2465 					};
2466 
2467 					ret = op_map_cb(ops, priv, &map_req);
2468 					if (ret)
2469 						return ret;
2470 				}
2471 
2472 				continue;
2473 			}
2474 
2475 			if (end > req_end) {
2476 				struct drm_gpuva_op_map n = {
2477 					.va.addr = req_end,
2478 					.va.range = end - req_end,
2479 					.gem.obj = obj,
2480 					.gem.offset = offset + ls_range +
2481 						      req_range,
2482 				};
2483 
2484 				ret = op_remap_cb(ops, priv, &p, &n, &u);
2485 				if (ret)
2486 					return ret;
2487 
2488 				if (madvise)
2489 					op_map = req;
2490 				break;
2491 			}
2492 		} else if (addr > req_addr) {
2493 			merge &= obj == req_obj &&
2494 				 offset == req_offset +
2495 					   (addr - req_addr);
2496 
2497 			if (end == req_end) {
2498 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2499 				if (ret)
2500 					return ret;
2501 
2502 				break;
2503 			}
2504 
2505 			if (end < req_end) {
2506 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2507 				if (ret)
2508 					return ret;
2509 
2510 				continue;
2511 			}
2512 
2513 			if (end > req_end) {
2514 				struct drm_gpuva_op_map n = {
2515 					.va.addr = req_end,
2516 					.va.range = end - req_end,
2517 					.gem.obj = obj,
2518 					.gem.offset = offset + req_end - addr,
2519 				};
2520 				struct drm_gpuva_op_unmap u = {
2521 					.va = va,
2522 					.keep = merge,
2523 				};
2524 
2525 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2526 				if (ret)
2527 					return ret;
2528 
2529 				if (madvise) {
2530 					struct drm_gpuvm_map_req map_req = {
2531 						.map.va.addr =  addr,
2532 						.map.va.range = req_end - addr,
2533 					};
2534 
2535 					return op_map_cb(ops, priv, &map_req);
2536 				}
2537 				break;
2538 			}
2539 		}
2540 	}
2541 	return op_map_cb(ops, priv, op_map);
2542 }
2543 
2544 static int
2545 __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
2546 		     const struct drm_gpuvm_ops *ops, void *priv,
2547 		     u64 req_addr, u64 req_range)
2548 {
2549 	struct drm_gpuva *va, *next;
2550 	u64 req_end = req_addr + req_range;
2551 	int ret;
2552 
2553 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2554 		return -EINVAL;
2555 
2556 	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2557 		struct drm_gpuva_op_map prev = {}, next = {};
2558 		bool prev_split = false, next_split = false;
2559 		struct drm_gem_object *obj = va->gem.obj;
2560 		u64 offset = va->gem.offset;
2561 		u64 addr = va->va.addr;
2562 		u64 range = va->va.range;
2563 		u64 end = addr + range;
2564 
2565 		if (addr < req_addr) {
2566 			prev.va.addr = addr;
2567 			prev.va.range = req_addr - addr;
2568 			prev.gem.obj = obj;
2569 			prev.gem.offset = offset;
2570 
2571 			prev_split = true;
2572 		}
2573 
2574 		if (end > req_end) {
2575 			next.va.addr = req_end;
2576 			next.va.range = end - req_end;
2577 			next.gem.obj = obj;
2578 			next.gem.offset = offset + (req_end - addr);
2579 
2580 			next_split = true;
2581 		}
2582 
2583 		if (prev_split || next_split) {
2584 			struct drm_gpuva_op_unmap unmap = { .va = va };
2585 
2586 			ret = op_remap_cb(ops, priv,
2587 					  prev_split ? &prev : NULL,
2588 					  next_split ? &next : NULL,
2589 					  &unmap);
2590 			if (ret)
2591 				return ret;
2592 		} else {
2593 			ret = op_unmap_cb(ops, priv, va, false, false);
2594 			if (ret)
2595 				return ret;
2596 		}
2597 	}
2598 
2599 	return 0;
2600 }
2601 
2602 /**
2603  * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
2604  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2605  * @priv: pointer to a driver private data structure
2606  * @req: ptr to struct drm_gpuvm_map_req
2607  *
2608  * This function iterates the given range of the GPU VA space. It utilizes the
2609  * &drm_gpuvm_ops to call back into the driver providing the split and merge
2610  * steps.
2611  *
2612  * Drivers may use these callbacks to update the GPU VA space right away within
2613  * the callback. In case the driver decides to copy and store the operations for
2614  * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
2615  * be called before the &drm_gpuvm's view of the GPU VA space was
2616  * updated with the previous set of operations. To update the
2617  * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2618  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2619  * used.
2620  *
2621  * A sequence of callbacks can contain map, unmap and remap operations, but
2622  * the sequence of callbacks might also be empty if no operation is required,
2623  * e.g. if the requested mapping already exists in the exact same way.
2624  *
2625  * There can be an arbitrary amount of unmap operations, a maximum of two remap
2626  * operations and a single map operation. The latter one represents the original
2627  * map operation requested by the caller.
2628  *
2629  * Returns: 0 on success or a negative error code
2630  */
2631 int
2632 drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2633 		 const struct drm_gpuvm_map_req *req)
2634 {
2635 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2636 
2637 	if (unlikely(!(ops && ops->sm_step_map &&
2638 		       ops->sm_step_remap &&
2639 		       ops->sm_step_unmap)))
2640 		return -EINVAL;
2641 
2642 	return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
2643 }
2644 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
2645 
2646 /**
2647  * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
2648  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2649  * @priv: pointer to a driver private data structure
2650  * @req_addr: the start address of the range to unmap
2651  * @req_range: the range of the mappings to unmap
2652  *
2653  * This function iterates the given range of the GPU VA space. It utilizes the
2654  * &drm_gpuvm_ops to call back into the driver providing the operations to
2655  * unmap and, if required, split existing mappings.
2656  *
2657  * Drivers may use these callbacks to update the GPU VA space right away within
2658  * the callback. In case the driver decides to copy and store the operations for
2659  * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
2660  * called before the &drm_gpuvm's view of the GPU VA space was updated
2661  * with the previous set of operations. To update the &drm_gpuvm's view
2662  * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2663  * drm_gpuva_destroy_unlocked() should be used.
2664  *
2665  * A sequence of callbacks can contain unmap and remap operations, depending on
2666  * whether there are actual overlapping mappings to split.
2667  *
2668  * There can be an arbitrary amount of unmap operations and a maximum of two
2669  * remap operations.
2670  *
2671  * Returns: 0 on success or a negative error code
2672  */
2673 int
2674 drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
2675 		   u64 req_addr, u64 req_range)
2676 {
2677 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2678 
2679 	if (unlikely(!(ops && ops->sm_step_remap &&
2680 		       ops->sm_step_unmap)))
2681 		return -EINVAL;
2682 
2683 	return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
2684 				    req_addr, req_range);
2685 }
2686 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
2687 
2688 static int
2689 drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
2690 {
2691 	struct drm_exec *exec = priv;
2692 
2693 	switch (op->op) {
2694 	case DRM_GPUVA_OP_REMAP:
2695 		if (op->remap.unmap->va->gem.obj)
2696 			return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
2697 		return 0;
2698 	case DRM_GPUVA_OP_UNMAP:
2699 		if (op->unmap.va->gem.obj)
2700 			return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
2701 		return 0;
2702 	default:
2703 		return 0;
2704 	}
2705 }
2706 
2707 static const struct drm_gpuvm_ops lock_ops = {
2708 	.sm_step_map = drm_gpuva_sm_step_lock,
2709 	.sm_step_remap = drm_gpuva_sm_step_lock,
2710 	.sm_step_unmap = drm_gpuva_sm_step_lock,
2711 };
2712 
2713 /**
2714  * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
2715  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2716  * @exec: the &drm_exec locking context
2717  * @num_fences: for newly mapped objects, the # of fences to reserve
2718  * @req: ptr to drm_gpuvm_map_req struct
2719  *
2720  * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2721  * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
2722  * will be newly mapped.
2723  *
2724  * The expected usage is::
2725  *
2726  *    vm_bind {
2727  *        struct drm_exec exec;
2728  *
2729  *        // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
2730  *        drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
2731  *
2732  *        drm_exec_until_all_locked (&exec) {
2733  *            for_each_vm_bind_operation {
2734  *                switch (op->op) {
2735  *                case DRIVER_OP_UNMAP:
2736  *                    ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
2737  *                    break;
2738  *                case DRIVER_OP_MAP:
2739  *                    ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
2740  *                    break;
2741  *                }
2742  *
2743  *                drm_exec_retry_on_contention(&exec);
2744  *                if (ret)
2745  *                    return ret;
2746  *            }
2747  *        }
2748  *    }
2749  *
2750  * This enables all locking to be performed before the driver begins modifying
2751  * the VM.  This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
2752  * where an earlier op can alter the sequence of steps generated for a later
2753  * op, because the later altered step will involve the same GEM object(s)
2754  * already seen in the earlier locking step.  For example:
2755  *
2756  * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
2757  *    DRM_GPUVA_OP_REMAP/UNMAP step.  This is safe because we've already
2758  *    locked the GEM object in the earlier DRIVER_OP_UNMAP op.
2759  *
2760  * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
2761  *    op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
2762  *    required without the earlier DRIVER_OP_MAP.  This is safe because we've
2763  *    already locked the GEM object in the earlier DRIVER_OP_MAP step.
2764  *
2765  * Returns: 0 on success or a negative error code
2766  */
2767 int
2768 drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
2769 			   struct drm_exec *exec, unsigned int num_fences,
2770 			   struct drm_gpuvm_map_req *req)
2771 {
2772 	struct drm_gem_object *req_obj = req->map.gem.obj;
2773 
2774 	if (req_obj) {
2775 		int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
2776 		if (ret)
2777 			return ret;
2778 	}
2779 
2780 	return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
2781 
2782 }
2783 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
2784 
2785 /**
2786  * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
2787  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2788  * @exec: the &drm_exec locking context
2789  * @req_addr: the start address of the range to unmap
2790  * @req_range: the range of the mappings to unmap
2791  *
2792  * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2793  * remapped by drm_gpuvm_sm_unmap().
2794  *
2795  * See drm_gpuvm_sm_map_exec_lock() for expected usage.
2796  *
2797  * Returns: 0 on success or a negative error code
2798  */
2799 int
2800 drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
2801 			     u64 req_addr, u64 req_range)
2802 {
2803 	return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
2804 				    req_addr, req_range);
2805 }
2806 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
2807 
2808 static struct drm_gpuva_op *
2809 gpuva_op_alloc(struct drm_gpuvm *gpuvm)
2810 {
2811 	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2812 	struct drm_gpuva_op *op;
2813 
2814 	if (fn && fn->op_alloc)
2815 		op = fn->op_alloc();
2816 	else
2817 		op = kzalloc(sizeof(*op), GFP_KERNEL);
2818 
2819 	if (unlikely(!op))
2820 		return NULL;
2821 
2822 	return op;
2823 }
2824 
2825 static void
2826 gpuva_op_free(struct drm_gpuvm *gpuvm,
2827 	      struct drm_gpuva_op *op)
2828 {
2829 	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2830 
2831 	if (fn && fn->op_free)
2832 		fn->op_free(op);
2833 	else
2834 		kfree(op);
2835 }
2836 
2837 static int
2838 drm_gpuva_sm_step(struct drm_gpuva_op *__op,
2839 		  void *priv)
2840 {
2841 	struct {
2842 		struct drm_gpuvm *vm;
2843 		struct drm_gpuva_ops *ops;
2844 	} *args = priv;
2845 	struct drm_gpuvm *gpuvm = args->vm;
2846 	struct drm_gpuva_ops *ops = args->ops;
2847 	struct drm_gpuva_op *op;
2848 
2849 	op = gpuva_op_alloc(gpuvm);
2850 	if (unlikely(!op))
2851 		goto err;
2852 
2853 	memcpy(op, __op, sizeof(*op));
2854 
2855 	if (op->op == DRM_GPUVA_OP_REMAP) {
2856 		struct drm_gpuva_op_remap *__r = &__op->remap;
2857 		struct drm_gpuva_op_remap *r = &op->remap;
2858 
2859 		r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
2860 				   GFP_KERNEL);
2861 		if (unlikely(!r->unmap))
2862 			goto err_free_op;
2863 
2864 		if (__r->prev) {
2865 			r->prev = kmemdup(__r->prev, sizeof(*r->prev),
2866 					  GFP_KERNEL);
2867 			if (unlikely(!r->prev))
2868 				goto err_free_unmap;
2869 		}
2870 
2871 		if (__r->next) {
2872 			r->next = kmemdup(__r->next, sizeof(*r->next),
2873 					  GFP_KERNEL);
2874 			if (unlikely(!r->next))
2875 				goto err_free_prev;
2876 		}
2877 	}
2878 
2879 	list_add_tail(&op->entry, &ops->list);
2880 
2881 	return 0;
2882 
2883 err_free_unmap:
2884 	kfree(op->remap.unmap);
2885 err_free_prev:
2886 	kfree(op->remap.prev);
2887 err_free_op:
2888 	gpuva_op_free(gpuvm, op);
2889 err:
2890 	return -ENOMEM;
2891 }
2892 
2893 static const struct drm_gpuvm_ops gpuvm_list_ops = {
2894 	.sm_step_map = drm_gpuva_sm_step,
2895 	.sm_step_remap = drm_gpuva_sm_step,
2896 	.sm_step_unmap = drm_gpuva_sm_step,
2897 };
2898 
2899 static struct drm_gpuva_ops *
2900 __drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2901 			      const struct drm_gpuvm_map_req *req,
2902 			      bool madvise)
2903 {
2904 	struct drm_gpuva_ops *ops;
2905 	struct {
2906 		struct drm_gpuvm *vm;
2907 		struct drm_gpuva_ops *ops;
2908 	} args;
2909 	int ret;
2910 
2911 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2912 	if (unlikely(!ops))
2913 		return ERR_PTR(-ENOMEM);
2914 
2915 	INIT_LIST_HEAD(&ops->list);
2916 
2917 	args.vm = gpuvm;
2918 	args.ops = ops;
2919 
2920 	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
2921 	if (ret)
2922 		goto err_free_ops;
2923 
2924 	return ops;
2925 
2926 err_free_ops:
2927 	drm_gpuva_ops_free(gpuvm, ops);
2928 	return ERR_PTR(ret);
2929 }
2930 
2931 /**
2932  * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2933  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2934  * @req: map request arguments
2935  *
2936  * This function creates a list of operations to perform splitting and merging
2937  * of existing mapping(s) with the newly requested one.
2938  *
2939  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2940  * in the given order. It can contain map, unmap and remap operations, but it
2941  * also can be empty if no operation is required, e.g. if the requested mapping
2942  * already exists in the exact same way.
2943  *
2944  * There can be an arbitrary amount of unmap operations, a maximum of two remap
2945  * operations and a single map operation. The latter one represents the original
2946  * map operation requested by the caller.
2947  *
2948  * Note that before calling this function again with another mapping request it
2949  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2950  * previously obtained operations must be either processed or abandoned. To
2951  * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2952  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2953  * used.
2954  *
2955  * After the caller finished processing the returned &drm_gpuva_ops, they must
2956  * be freed with &drm_gpuva_ops_free.
2957  *
2958  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2959  */
2960 struct drm_gpuva_ops *
2961 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2962 			    const struct drm_gpuvm_map_req *req)
2963 {
2964 	return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
2965 }
2966 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
2967 
2968 /**
2969  * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
2970  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2971  * @req: map request arguments
2972  *
2973  * This function creates a list of operations to perform splitting
2974  * of existent mapping(s) at start or end, based on the request map.
2975  *
2976  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2977  * in the given order. It can contain map and remap operations, but it
2978  * also can be empty if no operation is required, e.g. if the requested mapping
2979  * already exists is the exact same way.
2980  *
2981  * There will be no unmap operations, a maximum of two remap operations and two
2982  * map operations. The two map operations correspond to: one from start to the
2983  * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
2984  *
2985  * Note that before calling this function again with another mapping request it
2986  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2987  * previously obtained operations must be either processed or abandoned. To
2988  * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2989  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2990  * used.
2991  *
2992  * After the caller finished processing the returned &drm_gpuva_ops, they must
2993  * be freed with &drm_gpuva_ops_free.
2994  *
2995  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2996  */
2997 struct drm_gpuva_ops *
2998 drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
2999 			     const struct drm_gpuvm_map_req *req)
3000 {
3001 	return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
3002 }
3003 EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
3004 
3005 /**
3006  * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
3007  * unmap
3008  * @gpuvm: the &drm_gpuvm representing the GPU VA space
3009  * @req_addr: the start address of the range to unmap
3010  * @req_range: the range of the mappings to unmap
3011  *
3012  * This function creates a list of operations to perform unmapping and, if
3013  * required, splitting of the mappings overlapping the unmap range.
3014  *
3015  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
3016  * in the given order. It can contain unmap and remap operations, depending on
3017  * whether there are actual overlapping mappings to split.
3018  *
3019  * There can be an arbitrary amount of unmap operations and a maximum of two
3020  * remap operations.
3021  *
3022  * Note that before calling this function again with another range to unmap it
3023  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
3024  * previously obtained operations must be processed or abandoned. To update the
3025  * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
3026  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
3027  * used.
3028  *
3029  * After the caller finished processing the returned &drm_gpuva_ops, they must
3030  * be freed with &drm_gpuva_ops_free.
3031  *
3032  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3033  */
3034 struct drm_gpuva_ops *
3035 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
3036 			      u64 req_addr, u64 req_range)
3037 {
3038 	struct drm_gpuva_ops *ops;
3039 	struct {
3040 		struct drm_gpuvm *vm;
3041 		struct drm_gpuva_ops *ops;
3042 	} args;
3043 	int ret;
3044 
3045 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3046 	if (unlikely(!ops))
3047 		return ERR_PTR(-ENOMEM);
3048 
3049 	INIT_LIST_HEAD(&ops->list);
3050 
3051 	args.vm = gpuvm;
3052 	args.ops = ops;
3053 
3054 	ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
3055 				   req_addr, req_range);
3056 	if (ret)
3057 		goto err_free_ops;
3058 
3059 	return ops;
3060 
3061 err_free_ops:
3062 	drm_gpuva_ops_free(gpuvm, ops);
3063 	return ERR_PTR(ret);
3064 }
3065 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
3066 
3067 /**
3068  * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
3069  * @gpuvm: the &drm_gpuvm representing the GPU VA space
3070  * @addr: the start address of the range to prefetch
3071  * @range: the range of the mappings to prefetch
3072  *
3073  * This function creates a list of operations to perform prefetching.
3074  *
3075  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
3076  * in the given order. It can contain prefetch operations.
3077  *
3078  * There can be an arbitrary amount of prefetch operations.
3079  *
3080  * After the caller finished processing the returned &drm_gpuva_ops, they must
3081  * be freed with &drm_gpuva_ops_free.
3082  *
3083  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3084  */
3085 struct drm_gpuva_ops *
3086 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
3087 			      u64 addr, u64 range)
3088 {
3089 	struct drm_gpuva_ops *ops;
3090 	struct drm_gpuva_op *op;
3091 	struct drm_gpuva *va;
3092 	u64 end = addr + range;
3093 	int ret;
3094 
3095 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3096 	if (!ops)
3097 		return ERR_PTR(-ENOMEM);
3098 
3099 	INIT_LIST_HEAD(&ops->list);
3100 
3101 	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
3102 		op = gpuva_op_alloc(gpuvm);
3103 		if (!op) {
3104 			ret = -ENOMEM;
3105 			goto err_free_ops;
3106 		}
3107 
3108 		op->op = DRM_GPUVA_OP_PREFETCH;
3109 		op->prefetch.va = va;
3110 		list_add_tail(&op->entry, &ops->list);
3111 	}
3112 
3113 	return ops;
3114 
3115 err_free_ops:
3116 	drm_gpuva_ops_free(gpuvm, ops);
3117 	return ERR_PTR(ret);
3118 }
3119 EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
3120 
3121 /**
3122  * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
3123  * @vm_bo: the &drm_gpuvm_bo abstraction
3124  *
3125  * This function creates a list of operations to perform unmapping for every
3126  * GPUVA attached to a GEM.
3127  *
3128  * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
3129  * arbitrary amount of unmap operations.
3130  *
3131  * After the caller finished processing the returned &drm_gpuva_ops, they must
3132  * be freed with &drm_gpuva_ops_free.
3133  *
3134  * This function expects the caller to protect the GEM's GPUVA list against
3135  * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
3136  *
3137  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3138  */
3139 struct drm_gpuva_ops *
3140 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
3141 {
3142 	struct drm_gpuva_ops *ops;
3143 	struct drm_gpuva_op *op;
3144 	struct drm_gpuva *va;
3145 	int ret;
3146 
3147 	drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
3148 
3149 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3150 	if (!ops)
3151 		return ERR_PTR(-ENOMEM);
3152 
3153 	INIT_LIST_HEAD(&ops->list);
3154 
3155 	drm_gpuvm_bo_for_each_va(va, vm_bo) {
3156 		op = gpuva_op_alloc(vm_bo->vm);
3157 		if (!op) {
3158 			ret = -ENOMEM;
3159 			goto err_free_ops;
3160 		}
3161 
3162 		op->op = DRM_GPUVA_OP_UNMAP;
3163 		op->unmap.va = va;
3164 		list_add_tail(&op->entry, &ops->list);
3165 	}
3166 
3167 	return ops;
3168 
3169 err_free_ops:
3170 	drm_gpuva_ops_free(vm_bo->vm, ops);
3171 	return ERR_PTR(ret);
3172 }
3173 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
3174 
3175 /**
3176  * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
3177  * @gpuvm: the &drm_gpuvm the ops were created for
3178  * @ops: the &drm_gpuva_ops to free
3179  *
3180  * Frees the given &drm_gpuva_ops structure including all the ops associated
3181  * with it.
3182  */
3183 void
3184 drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
3185 		   struct drm_gpuva_ops *ops)
3186 {
3187 	struct drm_gpuva_op *op, *next;
3188 
3189 	drm_gpuva_for_each_op_safe(op, next, ops) {
3190 		list_del(&op->entry);
3191 
3192 		if (op->op == DRM_GPUVA_OP_REMAP) {
3193 			kfree(op->remap.prev);
3194 			kfree(op->remap.next);
3195 			kfree(op->remap.unmap);
3196 		}
3197 
3198 		gpuva_op_free(gpuvm, op);
3199 	}
3200 
3201 	kfree(ops);
3202 }
3203 EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
3204 
3205 MODULE_DESCRIPTION("DRM GPUVM");
3206 MODULE_LICENSE("GPL");
3207