xref: /linux/drivers/gpu/drm/drm_gpuvm.c (revision cf4fd52e323604ccfa8390917593e1fb965653ee)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright (c) 2022 Red Hat.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *     Danilo Krummrich <dakr@redhat.com>
25  *
26  */
27 
28 #include <drm/drm_gpuvm.h>
29 
30 #include <linux/export.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/mm.h>
33 
34 /**
35  * DOC: Overview
36  *
37  * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
38  * GPU's virtual address (VA) space and manages the corresponding virtual
39  * mappings represented by &drm_gpuva objects. It also keeps track of the
40  * mapping's backing &drm_gem_object buffers.
41  *
42  * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
43  * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
44  *
45  * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
46  * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
47  *
48  * The GPU VA manager internally uses a rb-tree to manage the
49  * &drm_gpuva mappings within a GPU's virtual address space.
50  *
51  * The &drm_gpuvm structure contains a special &drm_gpuva representing the
52  * portion of VA space reserved by the kernel. This node is initialized together
53  * with the GPU VA manager instance and removed when the GPU VA manager is
54  * destroyed.
55  *
56  * In a typical application drivers would embed struct drm_gpuvm and
57  * struct drm_gpuva within their own driver specific structures, there won't be
58  * any memory allocations of its own nor memory allocations of &drm_gpuva
59  * entries.
60  *
61  * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
62  * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
63  * entries from within dma-fence signalling critical sections it is enough to
64  * pre-allocate the &drm_gpuva structures.
65  *
66  * &drm_gem_objects which are private to a single VM can share a common
67  * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
68  * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
69  * the following called 'resv object', which serves as the container of the
70  * GPUVM's shared &dma_resv. This resv object can be a driver specific
71  * &drm_gem_object, such as the &drm_gem_object containing the root page table,
72  * but it can also be a 'dummy' object, which can be allocated with
73  * drm_gpuvm_resv_object_alloc().
74  *
75  * In order to connect a struct drm_gpuva its backing &drm_gem_object each
76  * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
77  * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
78  *
79  * A &drm_gpuvm_bo is an abstraction that represents a combination of a
80  * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
81  * This is ensured by the API through drm_gpuvm_bo_obtain() and
82  * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
83  * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
84  * particular combination. If not existent a new instance is created and linked
85  * to the &drm_gem_object.
86  *
87  * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
88  * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
89  * lists are maintained in order to accelerate locking of dma-resv locks and
90  * validation of evicted objects bound in a &drm_gpuvm. For instance, all
91  * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
92  * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
93  * order to validate all evicted &drm_gem_objects. It is also possible to lock
94  * additional &drm_gem_objects by providing the corresponding parameters to
95  * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
96  * use of helper functions such as drm_gpuvm_prepare_range() or
97  * drm_gpuvm_prepare_objects().
98  *
99  * Every bound &drm_gem_object is treated as external object when its &dma_resv
100  * structure is different than the &drm_gpuvm's common &dma_resv structure.
101  */
102 
103 /**
104  * DOC: Split and Merge
105  *
106  * Besides its capability to manage and represent a GPU VA space, the
107  * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
108  * sequence of operations to satisfy a given map or unmap request.
109  *
110  * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
111  * and merging of existent GPU VA mappings with the ones that are requested to
112  * be mapped or unmapped. This feature is required by the Vulkan API to
113  * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
114  * as VM BIND.
115  *
116  * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
117  * containing map, unmap and remap operations for a given newly requested
118  * mapping. The sequence of callbacks represents the set of operations to
119  * execute in order to integrate the new mapping cleanly into the current state
120  * of the GPU VA space.
121  *
122  * Depending on how the new GPU VA mapping intersects with the existent mappings
123  * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
124  * of unmap operations, a maximum of two remap operations and a single map
125  * operation. The caller might receive no callback at all if no operation is
126  * required, e.g. if the requested mapping already exists in the exact same way.
127  *
128  * The single map operation represents the original map operation requested by
129  * the caller.
130  *
131  * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
132  * &drm_gpuva to unmap is physically contiguous with the original mapping
133  * request. Optionally, if 'keep' is set, drivers may keep the actual page table
134  * entries for this &drm_gpuva, adding the missing page table entries only and
135  * update the &drm_gpuvm's view of things accordingly.
136  *
137  * Drivers may do the same optimization, namely delta page table updates, also
138  * for remap operations. This is possible since &drm_gpuva_op_remap consists of
139  * one unmap operation and one or two map operations, such that drivers can
140  * derive the page table update delta accordingly.
141  *
142  * Note that there can't be more than two existent mappings to split up, one at
143  * the beginning and one at the end of the new mapping, hence there is a
144  * maximum of two remap operations.
145  *
146  * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
147  * call back into the driver in order to unmap a range of GPU VA space. The
148  * logic behind this function is way simpler though: For all existent mappings
149  * enclosed by the given range unmap operations are created. For mappings which
150  * are only partically located within the given range, remap operations are
151  * created such that those mappings are split up and re-mapped partically.
152  *
153  * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
154  * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
155  * to directly obtain an instance of struct drm_gpuva_ops containing a list of
156  * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
157  * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
158  * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
159  * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
160  * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
161  * allocations are possible (e.g. to allocate GPU page tables) and once in the
162  * dma-fence signalling critical path.
163  *
164  * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
165  * drm_gpuva_remove() may be used. These functions can safely be used from
166  * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
167  * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
168  * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
169  * drm_gpuva_unmap() instead.
170  *
171  * The following diagram depicts the basic relationships of existent GPU VA
172  * mappings, a newly requested mapping and the resulting mappings as implemented
173  * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
174  *
175  * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
176  *    could be kept.
177  *
178  *    ::
179  *
180  *	     0     a     1
181  *	old: |-----------| (bo_offset=n)
182  *
183  *	     0     a     1
184  *	req: |-----------| (bo_offset=n)
185  *
186  *	     0     a     1
187  *	new: |-----------| (bo_offset=n)
188  *
189  *
190  * 2) Requested mapping is identical, except for the BO offset, hence replace
191  *    the mapping.
192  *
193  *    ::
194  *
195  *	     0     a     1
196  *	old: |-----------| (bo_offset=n)
197  *
198  *	     0     a     1
199  *	req: |-----------| (bo_offset=m)
200  *
201  *	     0     a     1
202  *	new: |-----------| (bo_offset=m)
203  *
204  *
205  * 3) Requested mapping is identical, except for the backing BO, hence replace
206  *    the mapping.
207  *
208  *    ::
209  *
210  *	     0     a     1
211  *	old: |-----------| (bo_offset=n)
212  *
213  *	     0     b     1
214  *	req: |-----------| (bo_offset=n)
215  *
216  *	     0     b     1
217  *	new: |-----------| (bo_offset=n)
218  *
219  *
220  * 4) Existent mapping is a left aligned subset of the requested one, hence
221  *    replace the existent one.
222  *
223  *    ::
224  *
225  *	     0  a  1
226  *	old: |-----|       (bo_offset=n)
227  *
228  *	     0     a     2
229  *	req: |-----------| (bo_offset=n)
230  *
231  *	     0     a     2
232  *	new: |-----------| (bo_offset=n)
233  *
234  *    .. note::
235  *       We expect to see the same result for a request with a different BO
236  *       and/or non-contiguous BO offset.
237  *
238  *
239  * 5) Requested mapping's range is a left aligned subset of the existent one,
240  *    but backed by a different BO. Hence, map the requested mapping and split
241  *    the existent one adjusting its BO offset.
242  *
243  *    ::
244  *
245  *	     0     a     2
246  *	old: |-----------| (bo_offset=n)
247  *
248  *	     0  b  1
249  *	req: |-----|       (bo_offset=n)
250  *
251  *	     0  b  1  a' 2
252  *	new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
253  *
254  *    .. note::
255  *       We expect to see the same result for a request with a different BO
256  *       and/or non-contiguous BO offset.
257  *
258  *
259  * 6) Existent mapping is a superset of the requested mapping. Split it up, but
260  *    indicate that the backing PTEs could be kept.
261  *
262  *    ::
263  *
264  *	     0     a     2
265  *	old: |-----------| (bo_offset=n)
266  *
267  *	     0  a  1
268  *	req: |-----|       (bo_offset=n)
269  *
270  *	     0  a  1  a' 2
271  *	new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
272  *
273  *
274  * 7) Requested mapping's range is a right aligned subset of the existent one,
275  *    but backed by a different BO. Hence, map the requested mapping and split
276  *    the existent one, without adjusting the BO offset.
277  *
278  *    ::
279  *
280  *	     0     a     2
281  *	old: |-----------| (bo_offset=n)
282  *
283  *	           1  b  2
284  *	req:       |-----| (bo_offset=m)
285  *
286  *	     0  a  1  b  2
287  *	new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
288  *
289  *
290  * 8) Existent mapping is a superset of the requested mapping. Split it up, but
291  *    indicate that the backing PTEs could be kept.
292  *
293  *    ::
294  *
295  *	      0     a     2
296  *	old: |-----------| (bo_offset=n)
297  *
298  *	           1  a  2
299  *	req:       |-----| (bo_offset=n+1)
300  *
301  *	     0  a' 1  a  2
302  *	new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
303  *
304  *
305  * 9) Existent mapping is overlapped at the end by the requested mapping backed
306  *    by a different BO. Hence, map the requested mapping and split up the
307  *    existent one, without adjusting the BO offset.
308  *
309  *    ::
310  *
311  *	     0     a     2
312  *	old: |-----------|       (bo_offset=n)
313  *
314  *	           1     b     3
315  *	req:       |-----------| (bo_offset=m)
316  *
317  *	     0  a  1     b     3
318  *	new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
319  *
320  *
321  * 10) Existent mapping is overlapped by the requested mapping, both having the
322  *     same backing BO with a contiguous offset. Indicate the backing PTEs of
323  *     the old mapping could be kept.
324  *
325  *     ::
326  *
327  *	      0     a     2
328  *	 old: |-----------|       (bo_offset=n)
329  *
330  *	            1     a     3
331  *	 req:       |-----------| (bo_offset=n+1)
332  *
333  *	      0  a' 1     a     3
334  *	 new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
335  *
336  *
337  * 11) Requested mapping's range is a centered subset of the existent one
338  *     having a different backing BO. Hence, map the requested mapping and split
339  *     up the existent one in two mappings, adjusting the BO offset of the right
340  *     one accordingly.
341  *
342  *     ::
343  *
344  *	      0        a        3
345  *	 old: |-----------------| (bo_offset=n)
346  *
347  *	            1  b  2
348  *	 req:       |-----|       (bo_offset=m)
349  *
350  *	      0  a  1  b  2  a' 3
351  *	 new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
352  *
353  *
354  * 12) Requested mapping is a contiguous subset of the existent one. Split it
355  *     up, but indicate that the backing PTEs could be kept.
356  *
357  *     ::
358  *
359  *	      0        a        3
360  *	 old: |-----------------| (bo_offset=n)
361  *
362  *	            1  a  2
363  *	 req:       |-----|       (bo_offset=n+1)
364  *
365  *	      0  a' 1  a  2 a'' 3
366  *	 old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
367  *
368  *
369  * 13) Existent mapping is a right aligned subset of the requested one, hence
370  *     replace the existent one.
371  *
372  *     ::
373  *
374  *	            1  a  2
375  *	 old:       |-----| (bo_offset=n+1)
376  *
377  *	      0     a     2
378  *	 req: |-----------| (bo_offset=n)
379  *
380  *	      0     a     2
381  *	 new: |-----------| (bo_offset=n)
382  *
383  *     .. note::
384  *        We expect to see the same result for a request with a different bo
385  *        and/or non-contiguous bo_offset.
386  *
387  *
388  * 14) Existent mapping is a centered subset of the requested one, hence
389  *     replace the existent one.
390  *
391  *     ::
392  *
393  *	            1  a  2
394  *	 old:       |-----| (bo_offset=n+1)
395  *
396  *	      0        a       3
397  *	 req: |----------------| (bo_offset=n)
398  *
399  *	      0        a       3
400  *	 new: |----------------| (bo_offset=n)
401  *
402  *     .. note::
403  *        We expect to see the same result for a request with a different bo
404  *        and/or non-contiguous bo_offset.
405  *
406  *
407  * 15) Existent mappings is overlapped at the beginning by the requested mapping
408  *     backed by a different BO. Hence, map the requested mapping and split up
409  *     the existent one, adjusting its BO offset accordingly.
410  *
411  *     ::
412  *
413  *	            1     a     3
414  *	 old:       |-----------| (bo_offset=n)
415  *
416  *	      0     b     2
417  *	 req: |-----------|       (bo_offset=m)
418  *
419  *	      0     b     2  a' 3
420  *	 new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
421  */
422 
423 /**
424  * DOC: Madvise Logic - Splitting and Traversal
425  *
426  * This logic handles GPU VA range updates by generating remap and map operations
427  * without performing unmaps or merging existing mappings.
428  *
429  * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
430  * the existing mapping at the start and end boundaries and inserts a new map.
431  *
432  * ::
433  *              a      start    end     b
434  *         pre: |-----------------------|
435  *                     drm_gpuva1
436  *
437  *              a      start    end     b
438  *         new: |-----|=========|-------|
439  *               remap   map      remap
440  *
441  * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
442  *
443  * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
444  * across boundaries, remapping the start and end segments, and inserting two
445  * map operations to cover the full range.
446  *
447  * ::           a       start      b              c        end       d
448  *         pre: |------------------|--------------|------------------|
449  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
450  *
451  *              a       start      b              c        end       d
452  *         new: |-------|==========|--------------|========|---------|
453  *                remap1   map1       drm_gpuva2    map2     remap2
454  *
455  * two REMAPS and two MAPS
456  *
457  * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
458  * are generated to update the affected portion.
459  *
460  *
461  * ::           a/start            b              c        end       d
462  *         pre: |------------------|--------------|------------------|
463  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
464  *
465  *              a/start            b              c        end       d
466  *         new: |------------------|--------------|========|---------|
467  *                drm_gpuva1         drm_gpuva2     map1     remap1
468  *
469  * ::           a       start      b              c/end              d
470  *         pre: |------------------|--------------|------------------|
471  *                    drm_gpuva1      drm_gpuva2         drm_gpuva3
472  *
473  *              a       start      b              c/end              d
474  *         new: |-------|==========|--------------|------------------|
475  *                remap1   map1       drm_gpuva2        drm_gpuva3
476  *
477  * one REMAP and one MAP
478  *
479  * 4) Both start and end align with existing drm_gpuva boundaries. No operations
480  * are needed as the range is already covered.
481  *
482  * 5) No existing drm_gpuvas. No operations.
483  *
484  * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
485  * focusing solely on remap and map operations for efficient traversal and update.
486  */
487 
488 /**
489  * DOC: Locking
490  *
491  * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
492  * locking itself, it is the drivers responsibility to take care about locking.
493  * Drivers might want to protect the following operations: inserting, removing
494  * and iterating &drm_gpuva objects as well as generating all kinds of
495  * operations, such as split / merge or prefetch.
496  *
497  * DRM GPUVM also does not take care of the locking of the backing
498  * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
499  * itself; drivers are responsible to enforce mutual exclusion using either the
500  * GEMs dma_resv lock or alternatively a driver specific external lock. For the
501  * latter see also drm_gem_gpuva_set_lock().
502  *
503  * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
504  * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
505  * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
506  * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
507  *
508  * The latter is required since on creation and destruction of a &drm_gpuvm_bo
509  * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
510  * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
511  * &drm_gem_object must be able to observe previous creations and destructions
512  * of &drm_gpuvm_bos in order to keep instances unique.
513  *
514  * The &drm_gpuvm's lists for keeping track of external and evicted objects are
515  * protected against concurrent insertion / removal and iteration internally.
516  *
517  * However, drivers still need ensure to protect concurrent calls to functions
518  * iterating those lists, namely drm_gpuvm_prepare_objects() and
519  * drm_gpuvm_validate().
520  *
521  * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
522  * that the corresponding &dma_resv locks are held in order to protect the
523  * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
524  * the corresponding lockdep checks are enabled. This is an optimization for
525  * drivers which are capable of taking the corresponding &dma_resv locks and
526  * hence do not require internal locking.
527  */
528 
529 /**
530  * DOC: Examples
531  *
532  * This section gives two examples on how to let the DRM GPUVA Manager generate
533  * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
534  * make use of them.
535  *
536  * The below code is strictly limited to illustrate the generic usage pattern.
537  * To maintain simplicitly, it doesn't make use of any abstractions for common
538  * code, different (asyncronous) stages with fence signalling critical paths,
539  * any other helpers or error handling in terms of freeing memory and dropping
540  * previously taken locks.
541  *
542  * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
543  *
544  *	// Allocates a new &drm_gpuva.
545  *	struct drm_gpuva * driver_gpuva_alloc(void);
546  *
547  *	// Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
548  *	// structure in individual driver structures and lock the dma-resv with
549  *	// drm_exec or similar helpers.
550  *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
551  *				  u64 addr, u64 range,
552  *				  struct drm_gem_object *obj, u64 offset)
553  *	{
554  *		struct drm_gpuvm_map_req map_req = {
555  *		        .map.va.addr = addr,
556  *	                .map.va.range = range,
557  *	                .map.gem.obj = obj,
558  *	                .map.gem.offset = offset,
559  *	           };
560  *		struct drm_gpuva_ops *ops;
561  *		struct drm_gpuva_op *op
562  *		struct drm_gpuvm_bo *vm_bo;
563  *
564  *		driver_lock_va_space();
565  *		ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
566  *		if (IS_ERR(ops))
567  *			return PTR_ERR(ops);
568  *
569  *		vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
570  *		if (IS_ERR(vm_bo))
571  *			return PTR_ERR(vm_bo);
572  *
573  *		drm_gpuva_for_each_op(op, ops) {
574  *			struct drm_gpuva *va;
575  *
576  *			switch (op->op) {
577  *			case DRM_GPUVA_OP_MAP:
578  *				va = driver_gpuva_alloc();
579  *				if (!va)
580  *					; // unwind previous VA space updates,
581  *					  // free memory and unlock
582  *
583  *				driver_vm_map();
584  *				drm_gpuva_map(gpuvm, va, &op->map);
585  *				drm_gpuva_link(va, vm_bo);
586  *
587  *				break;
588  *			case DRM_GPUVA_OP_REMAP: {
589  *				struct drm_gpuva *prev = NULL, *next = NULL;
590  *
591  *				va = op->remap.unmap->va;
592  *
593  *				if (op->remap.prev) {
594  *					prev = driver_gpuva_alloc();
595  *					if (!prev)
596  *						; // unwind previous VA space
597  *						  // updates, free memory and
598  *						  // unlock
599  *				}
600  *
601  *				if (op->remap.next) {
602  *					next = driver_gpuva_alloc();
603  *					if (!next)
604  *						; // unwind previous VA space
605  *						  // updates, free memory and
606  *						  // unlock
607  *				}
608  *
609  *				driver_vm_remap();
610  *				drm_gpuva_remap(prev, next, &op->remap);
611  *
612  *				if (prev)
613  *					drm_gpuva_link(prev, va->vm_bo);
614  *				if (next)
615  *					drm_gpuva_link(next, va->vm_bo);
616  *				drm_gpuva_unlink(va);
617  *
618  *				break;
619  *			}
620  *			case DRM_GPUVA_OP_UNMAP:
621  *				va = op->unmap->va;
622  *
623  *				driver_vm_unmap();
624  *				drm_gpuva_unlink(va);
625  *				drm_gpuva_unmap(&op->unmap);
626  *
627  *				break;
628  *			default:
629  *				break;
630  *			}
631  *		}
632  *		drm_gpuvm_bo_put(vm_bo);
633  *		driver_unlock_va_space();
634  *
635  *		return 0;
636  *	}
637  *
638  * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
639  *
640  *	struct driver_context {
641  *		struct drm_gpuvm *gpuvm;
642  *		struct drm_gpuvm_bo *vm_bo;
643  *		struct drm_gpuva *new_va;
644  *		struct drm_gpuva *prev_va;
645  *		struct drm_gpuva *next_va;
646  *	};
647  *
648  *	// ops to pass to drm_gpuvm_init()
649  *	static const struct drm_gpuvm_ops driver_gpuvm_ops = {
650  *		.sm_step_map = driver_gpuva_map,
651  *		.sm_step_remap = driver_gpuva_remap,
652  *		.sm_step_unmap = driver_gpuva_unmap,
653  *	};
654  *
655  *	// Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
656  *	// structure in individual driver structures and lock the dma-resv with
657  *	// drm_exec or similar helpers.
658  *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
659  *				  u64 addr, u64 range,
660  *				  struct drm_gem_object *obj, u64 offset)
661  *	{
662  *		struct driver_context ctx;
663  *		struct drm_gpuvm_bo *vm_bo;
664  *		struct drm_gpuva_ops *ops;
665  *		struct drm_gpuva_op *op;
666  *		int ret = 0;
667  *
668  *		ctx.gpuvm = gpuvm;
669  *
670  *		ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
671  *		ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
672  *		ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
673  *		ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
674  *		if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
675  *			ret = -ENOMEM;
676  *			goto out;
677  *		}
678  *
679  *		// Typically protected with a driver specific GEM gpuva lock
680  *		// used in the fence signaling path for drm_gpuva_link() and
681  *		// drm_gpuva_unlink(), hence pre-allocate.
682  *		ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
683  *
684  *		driver_lock_va_space();
685  *		ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
686  *		driver_unlock_va_space();
687  *
688  *	out:
689  *		drm_gpuvm_bo_put(ctx.vm_bo);
690  *		kfree(ctx.new_va);
691  *		kfree(ctx.prev_va);
692  *		kfree(ctx.next_va);
693  *		return ret;
694  *	}
695  *
696  *	int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
697  *	{
698  *		struct driver_context *ctx = __ctx;
699  *
700  *		drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
701  *
702  *		drm_gpuva_link(ctx->new_va, ctx->vm_bo);
703  *
704  *		// prevent the new GPUVA from being freed in
705  *		// driver_mapping_create()
706  *		ctx->new_va = NULL;
707  *
708  *		return 0;
709  *	}
710  *
711  *	int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
712  *	{
713  *		struct driver_context *ctx = __ctx;
714  *		struct drm_gpuva *va = op->remap.unmap->va;
715  *
716  *		drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
717  *
718  *		if (op->remap.prev) {
719  *			drm_gpuva_link(ctx->prev_va, va->vm_bo);
720  *			ctx->prev_va = NULL;
721  *		}
722  *
723  *		if (op->remap.next) {
724  *			drm_gpuva_link(ctx->next_va, va->vm_bo);
725  *			ctx->next_va = NULL;
726  *		}
727  *
728  *		drm_gpuva_unlink(va);
729  *		kfree(va);
730  *
731  *		return 0;
732  *	}
733  *
734  *	int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
735  *	{
736  *		drm_gpuva_unlink(op->unmap.va);
737  *		drm_gpuva_unmap(&op->unmap);
738  *		kfree(op->unmap.va);
739  *
740  *		return 0;
741  *	}
742  */
743 
744 /**
745  * get_next_vm_bo_from_list() - get the next vm_bo element
746  * @__gpuvm: the &drm_gpuvm
747  * @__list_name: the name of the list we're iterating on
748  * @__local_list: a pointer to the local list used to store already iterated items
749  * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
750  *
751  * This helper is here to provide lockless list iteration. Lockless as in, the
752  * iterator releases the lock immediately after picking the first element from
753  * the list, so list insertion deletion can happen concurrently.
754  *
755  * Elements popped from the original list are kept in a local list, so removal
756  * and is_empty checks can still happen while we're iterating the list.
757  */
758 #define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo)	\
759 	({										\
760 		struct drm_gpuvm_bo *__vm_bo = NULL;					\
761 											\
762 		drm_gpuvm_bo_put(__prev_vm_bo);						\
763 											\
764 		spin_lock(&(__gpuvm)->__list_name.lock);				\
765 		if (!(__gpuvm)->__list_name.local_list)					\
766 			(__gpuvm)->__list_name.local_list = __local_list;		\
767 		else									\
768 			drm_WARN_ON((__gpuvm)->drm,					\
769 				    (__gpuvm)->__list_name.local_list != __local_list);	\
770 											\
771 		while (!list_empty(&(__gpuvm)->__list_name.list)) {			\
772 			__vm_bo = list_first_entry(&(__gpuvm)->__list_name.list,	\
773 						   struct drm_gpuvm_bo,			\
774 						   list.entry.__list_name);		\
775 			if (kref_get_unless_zero(&__vm_bo->kref)) {			\
776 				list_move_tail(&(__vm_bo)->list.entry.__list_name,	\
777 					       __local_list);				\
778 				break;							\
779 			} else {							\
780 				list_del_init(&(__vm_bo)->list.entry.__list_name);	\
781 				__vm_bo = NULL;						\
782 			}								\
783 		}									\
784 		spin_unlock(&(__gpuvm)->__list_name.lock);				\
785 											\
786 		__vm_bo;								\
787 	})
788 
789 /**
790  * for_each_vm_bo_in_list() - internal vm_bo list iterator
791  * @__gpuvm: the &drm_gpuvm
792  * @__list_name: the name of the list we're iterating on
793  * @__local_list: a pointer to the local list used to store already iterated items
794  * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
795  *
796  * This helper is here to provide lockless list iteration. Lockless as in, the
797  * iterator releases the lock immediately after picking the first element from the
798  * list, hence list insertion and deletion can happen concurrently.
799  *
800  * It is not allowed to re-assign the vm_bo pointer from inside this loop.
801  *
802  * Typical use:
803  *
804  *	struct drm_gpuvm_bo *vm_bo;
805  *	LIST_HEAD(my_local_list);
806  *
807  *	ret = 0;
808  *	for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
809  *		ret = do_something_with_vm_bo(..., vm_bo);
810  *		if (ret)
811  *			break;
812  *	}
813  *	// Drop ref in case we break out of the loop.
814  *	drm_gpuvm_bo_put(vm_bo);
815  *	restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
816  *
817  *
818  * Only used for internal list iterations, not meant to be exposed to the outside
819  * world.
820  */
821 #define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo)	\
822 	for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
823 						__local_list, NULL);		\
824 	     __vm_bo;								\
825 	     __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
826 						__local_list, __vm_bo))
827 
828 static void
829 __restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
830 		     struct list_head *list, struct list_head **local_list)
831 {
832 	/* Merge back the two lists, moving local list elements to the
833 	 * head to preserve previous ordering, in case it matters.
834 	 */
835 	spin_lock(lock);
836 	if (*local_list) {
837 		list_splice(*local_list, list);
838 		*local_list = NULL;
839 	}
840 	spin_unlock(lock);
841 }
842 
843 /**
844  * restore_vm_bo_list() - move vm_bo elements back to their original list
845  * @__gpuvm: the &drm_gpuvm
846  * @__list_name: the name of the list we're iterating on
847  *
848  * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
849  * to restore the original state and let new iterations take place.
850  */
851 #define restore_vm_bo_list(__gpuvm, __list_name)			\
852 	__restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock,	\
853 			     &(__gpuvm)->__list_name.list,		\
854 			     &(__gpuvm)->__list_name.local_list)
855 
856 static void
857 cond_spin_lock(spinlock_t *lock, bool cond)
858 {
859 	if (cond)
860 		spin_lock(lock);
861 }
862 
863 static void
864 cond_spin_unlock(spinlock_t *lock, bool cond)
865 {
866 	if (cond)
867 		spin_unlock(lock);
868 }
869 
870 static void
871 __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
872 			struct list_head *entry, struct list_head *list)
873 {
874 	cond_spin_lock(lock, !!lock);
875 	if (list_empty(entry))
876 		list_add_tail(entry, list);
877 	cond_spin_unlock(lock, !!lock);
878 }
879 
880 /**
881  * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
882  * @__vm_bo: the &drm_gpuvm_bo
883  * @__list_name: the name of the list to insert into
884  * @__lock: whether to lock with the internal spinlock
885  *
886  * Inserts the given @__vm_bo into the list specified by @__list_name.
887  */
888 #define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock)			\
889 	__drm_gpuvm_bo_list_add((__vm_bo)->vm,					\
890 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
891 					 NULL,					\
892 				&(__vm_bo)->list.entry.__list_name,		\
893 				&(__vm_bo)->vm->__list_name.list)
894 
895 static void
896 __drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
897 			struct list_head *entry, bool init)
898 {
899 	cond_spin_lock(lock, !!lock);
900 	if (init) {
901 		if (!list_empty(entry))
902 			list_del_init(entry);
903 	} else {
904 		list_del(entry);
905 	}
906 	cond_spin_unlock(lock, !!lock);
907 }
908 
909 /**
910  * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
911  * @__vm_bo: the &drm_gpuvm_bo
912  * @__list_name: the name of the list to insert into
913  * @__lock: whether to lock with the internal spinlock
914  *
915  * Removes the given @__vm_bo from the list specified by @__list_name.
916  */
917 #define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock)		\
918 	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
919 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
920 					 NULL,					\
921 				&(__vm_bo)->list.entry.__list_name,		\
922 				true)
923 
924 /**
925  * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
926  * @__vm_bo: the &drm_gpuvm_bo
927  * @__list_name: the name of the list to insert into
928  * @__lock: whether to lock with the internal spinlock
929  *
930  * Removes the given @__vm_bo from the list specified by @__list_name.
931  */
932 #define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock)			\
933 	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
934 				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
935 					 NULL,					\
936 				&(__vm_bo)->list.entry.__list_name,		\
937 				false)
938 
939 #define to_drm_gpuva(__node)	container_of((__node), struct drm_gpuva, rb.node)
940 
941 #define GPUVA_START(node) ((node)->va.addr)
942 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
943 
944 /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
945  * about this.
946  */
947 INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
948 		     GPUVA_START, GPUVA_LAST, static __maybe_unused,
949 		     drm_gpuva_it)
950 
951 static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
952 			      struct drm_gpuva *va);
953 static void __drm_gpuva_remove(struct drm_gpuva *va);
954 
955 static bool
956 drm_gpuvm_check_overflow(u64 addr, u64 range)
957 {
958 	u64 end;
959 
960 	return check_add_overflow(addr, range, &end);
961 }
962 
963 static bool
964 drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
965 {
966 	return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
967 			"GPUVA address limited to %zu bytes.\n", sizeof(addr));
968 }
969 
970 static bool
971 drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
972 {
973 	u64 end = addr + range;
974 	u64 mm_start = gpuvm->mm_start;
975 	u64 mm_end = mm_start + gpuvm->mm_range;
976 
977 	return addr >= mm_start && end <= mm_end;
978 }
979 
980 static bool
981 drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
982 {
983 	u64 end = addr + range;
984 	u64 kstart = gpuvm->kernel_alloc_node.va.addr;
985 	u64 krange = gpuvm->kernel_alloc_node.va.range;
986 	u64 kend = kstart + krange;
987 
988 	return krange && addr < kend && kstart < end;
989 }
990 
991 /**
992  * drm_gpuvm_range_valid() - checks whether the given range is valid for the
993  * given &drm_gpuvm
994  * @gpuvm: the GPUVM to check the range for
995  * @addr: the base address
996  * @range: the range starting from the base address
997  *
998  * Checks whether the range is within the GPUVM's managed boundaries.
999  *
1000  * Returns: true for a valid range, false otherwise
1001  */
1002 bool
1003 drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
1004 		      u64 addr, u64 range)
1005 {
1006 	return !drm_gpuvm_check_overflow(addr, range) &&
1007 	       drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
1008 	       !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
1009 }
1010 EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
1011 
1012 static void
1013 drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
1014 {
1015 	drm_gem_object_release(obj);
1016 	kfree(obj);
1017 }
1018 
1019 static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
1020 	.free = drm_gpuvm_gem_object_free,
1021 };
1022 
1023 /**
1024  * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
1025  * @drm: the drivers &drm_device
1026  *
1027  * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
1028  * order to serve as root GEM object providing the &drm_resv shared across
1029  * &drm_gem_objects local to a single GPUVM.
1030  *
1031  * Returns: the &drm_gem_object on success, NULL on failure
1032  */
1033 struct drm_gem_object *
1034 drm_gpuvm_resv_object_alloc(struct drm_device *drm)
1035 {
1036 	struct drm_gem_object *obj;
1037 
1038 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1039 	if (!obj)
1040 		return NULL;
1041 
1042 	obj->funcs = &drm_gpuvm_object_funcs;
1043 	drm_gem_private_object_init(drm, obj, 0);
1044 
1045 	return obj;
1046 }
1047 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
1048 
1049 /**
1050  * drm_gpuvm_init() - initialize a &drm_gpuvm
1051  * @gpuvm: pointer to the &drm_gpuvm to initialize
1052  * @name: the name of the GPU VA space
1053  * @flags: the &drm_gpuvm_flags for this GPUVM
1054  * @drm: the &drm_device this VM resides in
1055  * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
1056  * @start_offset: the start offset of the GPU VA space
1057  * @range: the size of the GPU VA space
1058  * @reserve_offset: the start of the kernel reserved GPU VA area
1059  * @reserve_range: the size of the kernel reserved GPU VA area
1060  * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
1061  *
1062  * The &drm_gpuvm must be initialized with this function before use.
1063  *
1064  * Note that @gpuvm must be cleared to 0 before calling this function. The given
1065  * &name is expected to be managed by the surrounding driver structures.
1066  */
1067 void
1068 drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
1069 	       enum drm_gpuvm_flags flags,
1070 	       struct drm_device *drm,
1071 	       struct drm_gem_object *r_obj,
1072 	       u64 start_offset, u64 range,
1073 	       u64 reserve_offset, u64 reserve_range,
1074 	       const struct drm_gpuvm_ops *ops)
1075 {
1076 	gpuvm->rb.tree = RB_ROOT_CACHED;
1077 	INIT_LIST_HEAD(&gpuvm->rb.list);
1078 
1079 	INIT_LIST_HEAD(&gpuvm->extobj.list);
1080 	spin_lock_init(&gpuvm->extobj.lock);
1081 
1082 	INIT_LIST_HEAD(&gpuvm->evict.list);
1083 	spin_lock_init(&gpuvm->evict.lock);
1084 
1085 	kref_init(&gpuvm->kref);
1086 
1087 	gpuvm->name = name ? name : "unknown";
1088 	gpuvm->flags = flags;
1089 	gpuvm->ops = ops;
1090 	gpuvm->drm = drm;
1091 	gpuvm->r_obj = r_obj;
1092 
1093 	drm_gem_object_get(r_obj);
1094 
1095 	drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
1096 	gpuvm->mm_start = start_offset;
1097 	gpuvm->mm_range = range;
1098 
1099 	memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
1100 	if (reserve_range) {
1101 		gpuvm->kernel_alloc_node.va.addr = reserve_offset;
1102 		gpuvm->kernel_alloc_node.va.range = reserve_range;
1103 
1104 		if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
1105 							  reserve_range)))
1106 			__drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
1107 	}
1108 }
1109 EXPORT_SYMBOL_GPL(drm_gpuvm_init);
1110 
1111 static void
1112 drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
1113 {
1114 	gpuvm->name = NULL;
1115 
1116 	if (gpuvm->kernel_alloc_node.va.range)
1117 		__drm_gpuva_remove(&gpuvm->kernel_alloc_node);
1118 
1119 	drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
1120 		 "GPUVA tree is not empty, potentially leaking memory.\n");
1121 
1122 	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
1123 		 "Extobj list should be empty.\n");
1124 	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
1125 		 "Evict list should be empty.\n");
1126 
1127 	drm_gem_object_put(gpuvm->r_obj);
1128 }
1129 
1130 static void
1131 drm_gpuvm_free(struct kref *kref)
1132 {
1133 	struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
1134 
1135 	drm_gpuvm_fini(gpuvm);
1136 
1137 	if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
1138 		return;
1139 
1140 	gpuvm->ops->vm_free(gpuvm);
1141 }
1142 
1143 /**
1144  * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1145  * @gpuvm: the &drm_gpuvm to release the reference of
1146  *
1147  * This releases a reference to @gpuvm.
1148  *
1149  * This function may be called from atomic context.
1150  */
1151 void
1152 drm_gpuvm_put(struct drm_gpuvm *gpuvm)
1153 {
1154 	if (gpuvm)
1155 		kref_put(&gpuvm->kref, drm_gpuvm_free);
1156 }
1157 EXPORT_SYMBOL_GPL(drm_gpuvm_put);
1158 
1159 static int
1160 exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
1161 		 unsigned int num_fences)
1162 {
1163 	return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
1164 			    drm_exec_lock_obj(exec, obj);
1165 }
1166 
1167 /**
1168  * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1169  * @gpuvm: the &drm_gpuvm
1170  * @exec: the &drm_exec context
1171  * @num_fences: the amount of &dma_fences to reserve
1172  *
1173  * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
1174  * @num_fences is zero drm_exec_lock_obj() is called instead.
1175  *
1176  * Using this function directly, it is the drivers responsibility to call
1177  * drm_exec_init() and drm_exec_fini() accordingly.
1178  *
1179  * Returns: 0 on success, negative error code on failure.
1180  */
1181 int
1182 drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
1183 		     struct drm_exec *exec,
1184 		     unsigned int num_fences)
1185 {
1186 	return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
1187 }
1188 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
1189 
1190 static int
1191 __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1192 			    struct drm_exec *exec,
1193 			    unsigned int num_fences)
1194 {
1195 	struct drm_gpuvm_bo *vm_bo;
1196 	LIST_HEAD(extobjs);
1197 	int ret = 0;
1198 
1199 	for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
1200 		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1201 		if (ret)
1202 			break;
1203 	}
1204 	/* Drop ref in case we break out of the loop. */
1205 	drm_gpuvm_bo_put(vm_bo);
1206 	restore_vm_bo_list(gpuvm, extobj);
1207 
1208 	return ret;
1209 }
1210 
1211 static int
1212 drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
1213 				 struct drm_exec *exec,
1214 				 unsigned int num_fences)
1215 {
1216 	struct drm_gpuvm_bo *vm_bo;
1217 	int ret = 0;
1218 
1219 	drm_gpuvm_resv_assert_held(gpuvm);
1220 	list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1221 		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1222 		if (ret)
1223 			break;
1224 
1225 		if (vm_bo->evicted)
1226 			drm_gpuvm_bo_list_add(vm_bo, evict, false);
1227 	}
1228 
1229 	return ret;
1230 }
1231 
1232 /**
1233  * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
1234  * @gpuvm: the &drm_gpuvm
1235  * @exec: the &drm_exec locking context
1236  * @num_fences: the amount of &dma_fences to reserve
1237  *
1238  * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
1239  * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
1240  * is called instead.
1241  *
1242  * Using this function directly, it is the drivers responsibility to call
1243  * drm_exec_init() and drm_exec_fini() accordingly.
1244  *
1245  * Note: This function is safe against concurrent insertion and removal of
1246  * external objects, however it is not safe against concurrent usage itself.
1247  *
1248  * Drivers need to make sure to protect this case with either an outer VM lock
1249  * or by calling drm_gpuvm_prepare_vm() before this function within the
1250  * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1251  * mutual exclusion.
1252  *
1253  * Returns: 0 on success, negative error code on failure.
1254  */
1255 int
1256 drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1257 			  struct drm_exec *exec,
1258 			  unsigned int num_fences)
1259 {
1260 	if (drm_gpuvm_resv_protected(gpuvm))
1261 		return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
1262 							num_fences);
1263 	else
1264 		return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1265 }
1266 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
1267 
1268 /**
1269  * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1270  * @gpuvm: the &drm_gpuvm
1271  * @exec: the &drm_exec locking context
1272  * @addr: the start address within the VA space
1273  * @range: the range to iterate within the VA space
1274  * @num_fences: the amount of &dma_fences to reserve
1275  *
1276  * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
1277  * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
1278  * instead.
1279  *
1280  * Returns: 0 on success, negative error code on failure.
1281  */
1282 int
1283 drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1284 			u64 addr, u64 range, unsigned int num_fences)
1285 {
1286 	struct drm_gpuva *va;
1287 	u64 end = addr + range;
1288 	int ret;
1289 
1290 	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
1291 		struct drm_gem_object *obj = va->gem.obj;
1292 
1293 		ret = exec_prepare_obj(exec, obj, num_fences);
1294 		if (ret)
1295 			return ret;
1296 	}
1297 
1298 	return 0;
1299 }
1300 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
1301 
1302 /**
1303  * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
1304  * @vm_exec: the &drm_gpuvm_exec wrapper
1305  *
1306  * Acquires all dma-resv locks of all &drm_gem_objects the given
1307  * &drm_gpuvm contains mappings of.
1308  *
1309  * Addionally, when calling this function with struct drm_gpuvm_exec::extra
1310  * being set the driver receives the given @fn callback to lock additional
1311  * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1312  * would call drm_exec_prepare_obj() from within this callback.
1313  *
1314  * Returns: 0 on success, negative error code on failure.
1315  */
1316 int
1317 drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
1318 {
1319 	struct drm_gpuvm *gpuvm = vm_exec->vm;
1320 	struct drm_exec *exec = &vm_exec->exec;
1321 	unsigned int num_fences = vm_exec->num_fences;
1322 	int ret;
1323 
1324 	drm_exec_init(exec, vm_exec->flags, 0);
1325 
1326 	drm_exec_until_all_locked(exec) {
1327 		ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
1328 		drm_exec_retry_on_contention(exec);
1329 		if (ret)
1330 			goto err;
1331 
1332 		ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1333 		drm_exec_retry_on_contention(exec);
1334 		if (ret)
1335 			goto err;
1336 
1337 		if (vm_exec->extra.fn) {
1338 			ret = vm_exec->extra.fn(vm_exec);
1339 			drm_exec_retry_on_contention(exec);
1340 			if (ret)
1341 				goto err;
1342 		}
1343 	}
1344 
1345 	return 0;
1346 
1347 err:
1348 	drm_exec_fini(exec);
1349 	return ret;
1350 }
1351 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
1352 
1353 static int
1354 fn_lock_array(struct drm_gpuvm_exec *vm_exec)
1355 {
1356 	struct {
1357 		struct drm_gem_object **objs;
1358 		unsigned int num_objs;
1359 	} *args = vm_exec->extra.priv;
1360 
1361 	return drm_exec_prepare_array(&vm_exec->exec, args->objs,
1362 				      args->num_objs, vm_exec->num_fences);
1363 }
1364 
1365 /**
1366  * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
1367  * @vm_exec: the &drm_gpuvm_exec wrapper
1368  * @objs: additional &drm_gem_objects to lock
1369  * @num_objs: the number of additional &drm_gem_objects to lock
1370  *
1371  * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1372  * contains mappings of, plus the ones given through @objs.
1373  *
1374  * Returns: 0 on success, negative error code on failure.
1375  */
1376 int
1377 drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
1378 			  struct drm_gem_object **objs,
1379 			  unsigned int num_objs)
1380 {
1381 	struct {
1382 		struct drm_gem_object **objs;
1383 		unsigned int num_objs;
1384 	} args;
1385 
1386 	args.objs = objs;
1387 	args.num_objs = num_objs;
1388 
1389 	vm_exec->extra.fn = fn_lock_array;
1390 	vm_exec->extra.priv = &args;
1391 
1392 	return drm_gpuvm_exec_lock(vm_exec);
1393 }
1394 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
1395 
1396 /**
1397  * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1398  * @vm_exec: the &drm_gpuvm_exec wrapper
1399  * @addr: the start address within the VA space
1400  * @range: the range to iterate within the VA space
1401  *
1402  * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1403  * @addr + @range.
1404  *
1405  * Returns: 0 on success, negative error code on failure.
1406  */
1407 int
1408 drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
1409 			  u64 addr, u64 range)
1410 {
1411 	struct drm_gpuvm *gpuvm = vm_exec->vm;
1412 	struct drm_exec *exec = &vm_exec->exec;
1413 	int ret;
1414 
1415 	drm_exec_init(exec, vm_exec->flags, 0);
1416 
1417 	drm_exec_until_all_locked(exec) {
1418 		ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
1419 					      vm_exec->num_fences);
1420 		drm_exec_retry_on_contention(exec);
1421 		if (ret)
1422 			goto err;
1423 	}
1424 
1425 	return ret;
1426 
1427 err:
1428 	drm_exec_fini(exec);
1429 	return ret;
1430 }
1431 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
1432 
1433 static int
1434 __drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1435 {
1436 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1437 	struct drm_gpuvm_bo *vm_bo;
1438 	LIST_HEAD(evict);
1439 	int ret = 0;
1440 
1441 	for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
1442 		ret = ops->vm_bo_validate(vm_bo, exec);
1443 		if (ret)
1444 			break;
1445 	}
1446 	/* Drop ref in case we break out of the loop. */
1447 	drm_gpuvm_bo_put(vm_bo);
1448 	restore_vm_bo_list(gpuvm, evict);
1449 
1450 	return ret;
1451 }
1452 
1453 static int
1454 drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1455 {
1456 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1457 	struct drm_gpuvm_bo *vm_bo, *next;
1458 	int ret = 0;
1459 
1460 	drm_gpuvm_resv_assert_held(gpuvm);
1461 
1462 	list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
1463 				 list.entry.evict) {
1464 		ret = ops->vm_bo_validate(vm_bo, exec);
1465 		if (ret)
1466 			break;
1467 
1468 		dma_resv_assert_held(vm_bo->obj->resv);
1469 		if (!vm_bo->evicted)
1470 			drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
1471 	}
1472 
1473 	return ret;
1474 }
1475 
1476 /**
1477  * drm_gpuvm_validate() - validate all BOs marked as evicted
1478  * @gpuvm: the &drm_gpuvm to validate evicted BOs
1479  * @exec: the &drm_exec instance used for locking the GPUVM
1480  *
1481  * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
1482  * objects being mapped in the given &drm_gpuvm.
1483  *
1484  * Returns: 0 on success, negative error code on failure.
1485  */
1486 int
1487 drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1488 {
1489 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1490 
1491 	if (unlikely(!ops || !ops->vm_bo_validate))
1492 		return -EOPNOTSUPP;
1493 
1494 	if (drm_gpuvm_resv_protected(gpuvm))
1495 		return drm_gpuvm_validate_locked(gpuvm, exec);
1496 	else
1497 		return __drm_gpuvm_validate(gpuvm, exec);
1498 }
1499 EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
1500 
1501 /**
1502  * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1503  * dma-resv
1504  * @gpuvm: the &drm_gpuvm to add a fence to
1505  * @exec: the &drm_exec locking context
1506  * @fence: fence to add
1507  * @private_usage: private dma-resv usage
1508  * @extobj_usage: extobj dma-resv usage
1509  */
1510 void
1511 drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
1512 			 struct drm_exec *exec,
1513 			 struct dma_fence *fence,
1514 			 enum dma_resv_usage private_usage,
1515 			 enum dma_resv_usage extobj_usage)
1516 {
1517 	struct drm_gem_object *obj;
1518 	unsigned long index;
1519 
1520 	drm_exec_for_each_locked_object(exec, index, obj) {
1521 		dma_resv_assert_held(obj->resv);
1522 		dma_resv_add_fence(obj->resv, fence,
1523 				   drm_gpuvm_is_extobj(gpuvm, obj) ?
1524 				   extobj_usage : private_usage);
1525 	}
1526 }
1527 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
1528 
1529 /**
1530  * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1531  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1532  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1533  *
1534  * If provided by the driver, this function uses the &drm_gpuvm_ops
1535  * vm_bo_alloc() callback to allocate.
1536  *
1537  * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1538  */
1539 struct drm_gpuvm_bo *
1540 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
1541 		    struct drm_gem_object *obj)
1542 {
1543 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1544 	struct drm_gpuvm_bo *vm_bo;
1545 
1546 	if (ops && ops->vm_bo_alloc)
1547 		vm_bo = ops->vm_bo_alloc();
1548 	else
1549 		vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
1550 
1551 	if (unlikely(!vm_bo))
1552 		return NULL;
1553 
1554 	vm_bo->vm = drm_gpuvm_get(gpuvm);
1555 	vm_bo->obj = obj;
1556 	drm_gem_object_get(obj);
1557 
1558 	kref_init(&vm_bo->kref);
1559 	INIT_LIST_HEAD(&vm_bo->list.gpuva);
1560 	INIT_LIST_HEAD(&vm_bo->list.entry.gem);
1561 
1562 	INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
1563 	INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1564 
1565 	return vm_bo;
1566 }
1567 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
1568 
1569 static void
1570 drm_gpuvm_bo_destroy(struct kref *kref)
1571 {
1572 	struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1573 						  kref);
1574 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1575 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1576 	struct drm_gem_object *obj = vm_bo->obj;
1577 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1578 
1579 	if (!lock)
1580 		drm_gpuvm_resv_assert_held(gpuvm);
1581 
1582 	drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
1583 	drm_gpuvm_bo_list_del(vm_bo, evict, lock);
1584 
1585 	drm_gem_gpuva_assert_lock_held(obj);
1586 	list_del(&vm_bo->list.entry.gem);
1587 
1588 	if (ops && ops->vm_bo_free)
1589 		ops->vm_bo_free(vm_bo);
1590 	else
1591 		kfree(vm_bo);
1592 
1593 	drm_gpuvm_put(gpuvm);
1594 	drm_gem_object_put(obj);
1595 }
1596 
1597 /**
1598  * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1599  * @vm_bo: the &drm_gpuvm_bo to release the reference of
1600  *
1601  * This releases a reference to @vm_bo.
1602  *
1603  * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
1604  * includes removing it from the GEMs gpuva list. Hence, if a call to this
1605  * function can potentially let the reference count drop to zero the caller must
1606  * hold the dma-resv or driver specific GEM gpuva lock.
1607  *
1608  * This function may only be called from non-atomic context.
1609  *
1610  * Returns: true if vm_bo was destroyed, false otherwise.
1611  */
1612 bool
1613 drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
1614 {
1615 	might_sleep();
1616 
1617 	if (vm_bo)
1618 		return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
1619 
1620 	return false;
1621 }
1622 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
1623 
1624 static struct drm_gpuvm_bo *
1625 __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1626 		    struct drm_gem_object *obj)
1627 {
1628 	struct drm_gpuvm_bo *vm_bo;
1629 
1630 	drm_gem_gpuva_assert_lock_held(obj);
1631 	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
1632 		if (vm_bo->vm == gpuvm)
1633 			return vm_bo;
1634 
1635 	return NULL;
1636 }
1637 
1638 /**
1639  * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1640  * &drm_gpuvm and &drm_gem_object
1641  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1642  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1643  *
1644  * Find the &drm_gpuvm_bo representing the combination of the given
1645  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1646  * count of the &drm_gpuvm_bo accordingly.
1647  *
1648  * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1649  */
1650 struct drm_gpuvm_bo *
1651 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1652 		  struct drm_gem_object *obj)
1653 {
1654 	struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
1655 
1656 	return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
1657 }
1658 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
1659 
1660 /**
1661  * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
1662  * given &drm_gpuvm and &drm_gem_object
1663  * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1664  * @obj: The &drm_gem_object being mapped in the @gpuvm.
1665  *
1666  * Find the &drm_gpuvm_bo representing the combination of the given
1667  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1668  * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
1669  * &drm_gpuvm_bo.
1670  *
1671  * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1672  *
1673  * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
1674  */
1675 struct drm_gpuvm_bo *
1676 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
1677 		    struct drm_gem_object *obj)
1678 {
1679 	struct drm_gpuvm_bo *vm_bo;
1680 
1681 	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1682 	if (vm_bo)
1683 		return vm_bo;
1684 
1685 	vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
1686 	if (!vm_bo)
1687 		return ERR_PTR(-ENOMEM);
1688 
1689 	drm_gem_gpuva_assert_lock_held(obj);
1690 	list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
1691 
1692 	return vm_bo;
1693 }
1694 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
1695 
1696 /**
1697  * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
1698  * for the given &drm_gpuvm and &drm_gem_object
1699  * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1700  *
1701  * Find the &drm_gpuvm_bo representing the combination of the given
1702  * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1703  * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
1704  * count is decreased. If not found @__vm_bo is returned without further
1705  * increase of the reference count.
1706  *
1707  * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1708  *
1709  * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
1710  * &drm_gpuvm_bo was found
1711  */
1712 struct drm_gpuvm_bo *
1713 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
1714 {
1715 	struct drm_gpuvm *gpuvm = __vm_bo->vm;
1716 	struct drm_gem_object *obj = __vm_bo->obj;
1717 	struct drm_gpuvm_bo *vm_bo;
1718 
1719 	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1720 	if (vm_bo) {
1721 		drm_gpuvm_bo_put(__vm_bo);
1722 		return vm_bo;
1723 	}
1724 
1725 	drm_gem_gpuva_assert_lock_held(obj);
1726 	list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
1727 
1728 	return __vm_bo;
1729 }
1730 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
1731 
1732 /**
1733  * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1734  * extobj list
1735  * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
1736  *
1737  * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
1738  * already and if the corresponding &drm_gem_object is an external object,
1739  * actually.
1740  */
1741 void
1742 drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
1743 {
1744 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1745 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1746 
1747 	if (!lock)
1748 		drm_gpuvm_resv_assert_held(gpuvm);
1749 
1750 	if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
1751 		drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
1752 }
1753 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
1754 
1755 /**
1756  * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1757  * evicted list
1758  * @vm_bo: the &drm_gpuvm_bo to add or remove
1759  * @evict: indicates whether the object is evicted
1760  *
1761  * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
1762  */
1763 void
1764 drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
1765 {
1766 	struct drm_gpuvm *gpuvm = vm_bo->vm;
1767 	struct drm_gem_object *obj = vm_bo->obj;
1768 	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1769 
1770 	dma_resv_assert_held(obj->resv);
1771 	vm_bo->evicted = evict;
1772 
1773 	/* Can't add external objects to the evicted list directly if not using
1774 	 * internal spinlocks, since in this case the evicted list is protected
1775 	 * with the VM's common dma-resv lock.
1776 	 */
1777 	if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
1778 		return;
1779 
1780 	if (evict)
1781 		drm_gpuvm_bo_list_add(vm_bo, evict, lock);
1782 	else
1783 		drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
1784 }
1785 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
1786 
1787 static int
1788 __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1789 		   struct drm_gpuva *va)
1790 {
1791 	struct rb_node *node;
1792 	struct list_head *head;
1793 
1794 	if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
1795 				    GPUVA_START(va),
1796 				    GPUVA_LAST(va)))
1797 		return -EEXIST;
1798 
1799 	va->vm = gpuvm;
1800 
1801 	drm_gpuva_it_insert(va, &gpuvm->rb.tree);
1802 
1803 	node = rb_prev(&va->rb.node);
1804 	if (node)
1805 		head = &(to_drm_gpuva(node))->rb.entry;
1806 	else
1807 		head = &gpuvm->rb.list;
1808 
1809 	list_add(&va->rb.entry, head);
1810 
1811 	return 0;
1812 }
1813 
1814 /**
1815  * drm_gpuva_insert() - insert a &drm_gpuva
1816  * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
1817  * @va: the &drm_gpuva to insert
1818  *
1819  * Insert a &drm_gpuva with a given address and range into a
1820  * &drm_gpuvm.
1821  *
1822  * It is safe to use this function using the safe versions of iterating the GPU
1823  * VA space, such as drm_gpuvm_for_each_va_safe() and
1824  * drm_gpuvm_for_each_va_range_safe().
1825  *
1826  * Returns: 0 on success, negative error code on failure.
1827  */
1828 int
1829 drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1830 		 struct drm_gpuva *va)
1831 {
1832 	u64 addr = va->va.addr;
1833 	u64 range = va->va.range;
1834 	int ret;
1835 
1836 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
1837 		return -EINVAL;
1838 
1839 	ret = __drm_gpuva_insert(gpuvm, va);
1840 	if (likely(!ret))
1841 		/* Take a reference of the GPUVM for the successfully inserted
1842 		 * drm_gpuva. We can't take the reference in
1843 		 * __drm_gpuva_insert() itself, since we don't want to increse
1844 		 * the reference count for the GPUVM's kernel_alloc_node.
1845 		 */
1846 		drm_gpuvm_get(gpuvm);
1847 
1848 	return ret;
1849 }
1850 EXPORT_SYMBOL_GPL(drm_gpuva_insert);
1851 
1852 static void
1853 __drm_gpuva_remove(struct drm_gpuva *va)
1854 {
1855 	drm_gpuva_it_remove(va, &va->vm->rb.tree);
1856 	list_del_init(&va->rb.entry);
1857 }
1858 
1859 /**
1860  * drm_gpuva_remove() - remove a &drm_gpuva
1861  * @va: the &drm_gpuva to remove
1862  *
1863  * This removes the given &va from the underlaying tree.
1864  *
1865  * It is safe to use this function using the safe versions of iterating the GPU
1866  * VA space, such as drm_gpuvm_for_each_va_safe() and
1867  * drm_gpuvm_for_each_va_range_safe().
1868  */
1869 void
1870 drm_gpuva_remove(struct drm_gpuva *va)
1871 {
1872 	struct drm_gpuvm *gpuvm = va->vm;
1873 
1874 	if (unlikely(va == &gpuvm->kernel_alloc_node)) {
1875 		drm_WARN(gpuvm->drm, 1,
1876 			 "Can't destroy kernel reserved node.\n");
1877 		return;
1878 	}
1879 
1880 	__drm_gpuva_remove(va);
1881 	drm_gpuvm_put(va->vm);
1882 }
1883 EXPORT_SYMBOL_GPL(drm_gpuva_remove);
1884 
1885 /**
1886  * drm_gpuva_link() - link a &drm_gpuva
1887  * @va: the &drm_gpuva to link
1888  * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
1889  *
1890  * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
1891  * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
1892  *
1893  * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
1894  * reference of the latter is taken.
1895  *
1896  * This function expects the caller to protect the GEM's GPUVA list against
1897  * concurrent access using either the GEMs dma_resv lock or a driver specific
1898  * lock set through drm_gem_gpuva_set_lock().
1899  */
1900 void
1901 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
1902 {
1903 	struct drm_gem_object *obj = va->gem.obj;
1904 	struct drm_gpuvm *gpuvm = va->vm;
1905 
1906 	if (unlikely(!obj))
1907 		return;
1908 
1909 	drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
1910 
1911 	va->vm_bo = drm_gpuvm_bo_get(vm_bo);
1912 
1913 	drm_gem_gpuva_assert_lock_held(obj);
1914 	list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
1915 }
1916 EXPORT_SYMBOL_GPL(drm_gpuva_link);
1917 
1918 /**
1919  * drm_gpuva_unlink() - unlink a &drm_gpuva
1920  * @va: the &drm_gpuva to unlink
1921  *
1922  * This removes the given &va from the GPU VA list of the &drm_gem_object it is
1923  * associated with.
1924  *
1925  * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
1926  * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
1927  * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
1928  *
1929  * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
1930  * the latter is dropped.
1931  *
1932  * This function expects the caller to protect the GEM's GPUVA list against
1933  * concurrent access using either the GEMs dma_resv lock or a driver specific
1934  * lock set through drm_gem_gpuva_set_lock().
1935  */
1936 void
1937 drm_gpuva_unlink(struct drm_gpuva *va)
1938 {
1939 	struct drm_gem_object *obj = va->gem.obj;
1940 	struct drm_gpuvm_bo *vm_bo = va->vm_bo;
1941 
1942 	if (unlikely(!obj))
1943 		return;
1944 
1945 	drm_gem_gpuva_assert_lock_held(obj);
1946 	list_del_init(&va->gem.entry);
1947 
1948 	va->vm_bo = NULL;
1949 	drm_gpuvm_bo_put(vm_bo);
1950 }
1951 EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
1952 
1953 /**
1954  * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
1955  * @gpuvm: the &drm_gpuvm to search in
1956  * @addr: the &drm_gpuvas address
1957  * @range: the &drm_gpuvas range
1958  *
1959  * Returns: the first &drm_gpuva within the given range
1960  */
1961 struct drm_gpuva *
1962 drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
1963 		     u64 addr, u64 range)
1964 {
1965 	u64 last = addr + range - 1;
1966 
1967 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
1968 }
1969 EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
1970 
1971 /**
1972  * drm_gpuva_find() - find a &drm_gpuva
1973  * @gpuvm: the &drm_gpuvm to search in
1974  * @addr: the &drm_gpuvas address
1975  * @range: the &drm_gpuvas range
1976  *
1977  * Returns: the &drm_gpuva at a given &addr and with a given &range
1978  */
1979 struct drm_gpuva *
1980 drm_gpuva_find(struct drm_gpuvm *gpuvm,
1981 	       u64 addr, u64 range)
1982 {
1983 	struct drm_gpuva *va;
1984 
1985 	va = drm_gpuva_find_first(gpuvm, addr, range);
1986 	if (!va)
1987 		goto out;
1988 
1989 	if (va->va.addr != addr ||
1990 	    va->va.range != range)
1991 		goto out;
1992 
1993 	return va;
1994 
1995 out:
1996 	return NULL;
1997 }
1998 EXPORT_SYMBOL_GPL(drm_gpuva_find);
1999 
2000 /**
2001  * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
2002  * @gpuvm: the &drm_gpuvm to search in
2003  * @start: the given GPU VA's start address
2004  *
2005  * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
2006  *
2007  * Note that if there is any free space between the GPU VA mappings no mapping
2008  * is returned.
2009  *
2010  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2011  */
2012 struct drm_gpuva *
2013 drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
2014 {
2015 	if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
2016 		return NULL;
2017 
2018 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
2019 }
2020 EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
2021 
2022 /**
2023  * drm_gpuva_find_next() - find the &drm_gpuva after the given address
2024  * @gpuvm: the &drm_gpuvm to search in
2025  * @end: the given GPU VA's end address
2026  *
2027  * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
2028  *
2029  * Note that if there is any free space between the GPU VA mappings no mapping
2030  * is returned.
2031  *
2032  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2033  */
2034 struct drm_gpuva *
2035 drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
2036 {
2037 	if (!drm_gpuvm_range_valid(gpuvm, end, 1))
2038 		return NULL;
2039 
2040 	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
2041 }
2042 EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
2043 
2044 /**
2045  * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
2046  * is empty
2047  * @gpuvm: the &drm_gpuvm to check the range for
2048  * @addr: the start address of the range
2049  * @range: the range of the interval
2050  *
2051  * Returns: true if the interval is empty, false otherwise
2052  */
2053 bool
2054 drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
2055 {
2056 	return !drm_gpuva_find_first(gpuvm, addr, range);
2057 }
2058 EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
2059 
2060 /**
2061  * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
2062  * &drm_gpuva_op_map
2063  * @gpuvm: the &drm_gpuvm
2064  * @va: the &drm_gpuva to insert
2065  * @op: the &drm_gpuva_op_map to initialize @va with
2066  *
2067  * Initializes the @va from the @op and inserts it into the given @gpuvm.
2068  */
2069 void
2070 drm_gpuva_map(struct drm_gpuvm *gpuvm,
2071 	      struct drm_gpuva *va,
2072 	      struct drm_gpuva_op_map *op)
2073 {
2074 	drm_gpuva_init_from_op(va, op);
2075 	drm_gpuva_insert(gpuvm, va);
2076 }
2077 EXPORT_SYMBOL_GPL(drm_gpuva_map);
2078 
2079 /**
2080  * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2081  * &drm_gpuva_op_remap
2082  * @prev: the &drm_gpuva to remap when keeping the start of a mapping
2083  * @next: the &drm_gpuva to remap when keeping the end of a mapping
2084  * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
2085  *
2086  * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
2087  * @next.
2088  */
2089 void
2090 drm_gpuva_remap(struct drm_gpuva *prev,
2091 		struct drm_gpuva *next,
2092 		struct drm_gpuva_op_remap *op)
2093 {
2094 	struct drm_gpuva *va = op->unmap->va;
2095 	struct drm_gpuvm *gpuvm = va->vm;
2096 
2097 	drm_gpuva_remove(va);
2098 
2099 	if (op->prev) {
2100 		drm_gpuva_init_from_op(prev, op->prev);
2101 		drm_gpuva_insert(gpuvm, prev);
2102 	}
2103 
2104 	if (op->next) {
2105 		drm_gpuva_init_from_op(next, op->next);
2106 		drm_gpuva_insert(gpuvm, next);
2107 	}
2108 }
2109 EXPORT_SYMBOL_GPL(drm_gpuva_remap);
2110 
2111 /**
2112  * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2113  * &drm_gpuva_op_unmap
2114  * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
2115  *
2116  * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
2117  */
2118 void
2119 drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
2120 {
2121 	drm_gpuva_remove(op->va);
2122 }
2123 EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
2124 
2125 static int
2126 op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2127 	  const struct drm_gpuvm_map_req *req)
2128 {
2129 	struct drm_gpuva_op op = {};
2130 
2131 	if (!req)
2132 		return 0;
2133 
2134 	op.op = DRM_GPUVA_OP_MAP;
2135 	op.map.va.addr = req->map.va.addr;
2136 	op.map.va.range = req->map.va.range;
2137 	op.map.gem.obj = req->map.gem.obj;
2138 	op.map.gem.offset = req->map.gem.offset;
2139 
2140 	return fn->sm_step_map(&op, priv);
2141 }
2142 
2143 static int
2144 op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2145 	    struct drm_gpuva_op_map *prev,
2146 	    struct drm_gpuva_op_map *next,
2147 	    struct drm_gpuva_op_unmap *unmap)
2148 {
2149 	struct drm_gpuva_op op = {};
2150 	struct drm_gpuva_op_remap *r;
2151 
2152 	op.op = DRM_GPUVA_OP_REMAP;
2153 	r = &op.remap;
2154 	r->prev = prev;
2155 	r->next = next;
2156 	r->unmap = unmap;
2157 
2158 	return fn->sm_step_remap(&op, priv);
2159 }
2160 
2161 static int
2162 op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2163 	    struct drm_gpuva *va, bool merge, bool madvise)
2164 {
2165 	struct drm_gpuva_op op = {};
2166 
2167 	if (madvise)
2168 		return 0;
2169 
2170 	op.op = DRM_GPUVA_OP_UNMAP;
2171 	op.unmap.va = va;
2172 	op.unmap.keep = merge;
2173 
2174 	return fn->sm_step_unmap(&op, priv);
2175 }
2176 
2177 static int
2178 __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
2179 		   const struct drm_gpuvm_ops *ops, void *priv,
2180 		   const struct drm_gpuvm_map_req *req,
2181 		   bool madvise)
2182 {
2183 	struct drm_gem_object *req_obj = req->map.gem.obj;
2184 	const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
2185 	struct drm_gpuva *va, *next;
2186 	u64 req_offset = req->map.gem.offset;
2187 	u64 req_range = req->map.va.range;
2188 	u64 req_addr = req->map.va.addr;
2189 	u64 req_end = req_addr + req_range;
2190 	int ret;
2191 
2192 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2193 		return -EINVAL;
2194 
2195 	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2196 		struct drm_gem_object *obj = va->gem.obj;
2197 		u64 offset = va->gem.offset;
2198 		u64 addr = va->va.addr;
2199 		u64 range = va->va.range;
2200 		u64 end = addr + range;
2201 		bool merge = !!va->gem.obj;
2202 
2203 		if (madvise && obj)
2204 			continue;
2205 
2206 		if (addr == req_addr) {
2207 			merge &= obj == req_obj &&
2208 				 offset == req_offset;
2209 
2210 			if (end == req_end) {
2211 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2212 				if (ret)
2213 					return ret;
2214 				break;
2215 			}
2216 
2217 			if (end < req_end) {
2218 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2219 				if (ret)
2220 					return ret;
2221 				continue;
2222 			}
2223 
2224 			if (end > req_end) {
2225 				struct drm_gpuva_op_map n = {
2226 					.va.addr = req_end,
2227 					.va.range = range - req_range,
2228 					.gem.obj = obj,
2229 					.gem.offset = offset + req_range,
2230 				};
2231 				struct drm_gpuva_op_unmap u = {
2232 					.va = va,
2233 					.keep = merge,
2234 				};
2235 
2236 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2237 				if (ret)
2238 					return ret;
2239 
2240 				if (madvise)
2241 					op_map = req;
2242 				break;
2243 			}
2244 		} else if (addr < req_addr) {
2245 			u64 ls_range = req_addr - addr;
2246 			struct drm_gpuva_op_map p = {
2247 				.va.addr = addr,
2248 				.va.range = ls_range,
2249 				.gem.obj = obj,
2250 				.gem.offset = offset,
2251 			};
2252 			struct drm_gpuva_op_unmap u = { .va = va };
2253 
2254 			merge &= obj == req_obj &&
2255 				 offset + ls_range == req_offset;
2256 			u.keep = merge;
2257 
2258 			if (end == req_end) {
2259 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2260 				if (ret)
2261 					return ret;
2262 
2263 				if (madvise)
2264 					op_map = req;
2265 				break;
2266 			}
2267 
2268 			if (end < req_end) {
2269 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2270 				if (ret)
2271 					return ret;
2272 
2273 				if (madvise) {
2274 					struct drm_gpuvm_map_req map_req = {
2275 						.map.va.addr =  req_addr,
2276 						.map.va.range = end - req_addr,
2277 					};
2278 
2279 					ret = op_map_cb(ops, priv, &map_req);
2280 					if (ret)
2281 						return ret;
2282 				}
2283 
2284 				continue;
2285 			}
2286 
2287 			if (end > req_end) {
2288 				struct drm_gpuva_op_map n = {
2289 					.va.addr = req_end,
2290 					.va.range = end - req_end,
2291 					.gem.obj = obj,
2292 					.gem.offset = offset + ls_range +
2293 						      req_range,
2294 				};
2295 
2296 				ret = op_remap_cb(ops, priv, &p, &n, &u);
2297 				if (ret)
2298 					return ret;
2299 
2300 				if (madvise)
2301 					op_map = req;
2302 				break;
2303 			}
2304 		} else if (addr > req_addr) {
2305 			merge &= obj == req_obj &&
2306 				 offset == req_offset +
2307 					   (addr - req_addr);
2308 
2309 			if (end == req_end) {
2310 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2311 				if (ret)
2312 					return ret;
2313 
2314 				break;
2315 			}
2316 
2317 			if (end < req_end) {
2318 				ret = op_unmap_cb(ops, priv, va, merge, madvise);
2319 				if (ret)
2320 					return ret;
2321 
2322 				continue;
2323 			}
2324 
2325 			if (end > req_end) {
2326 				struct drm_gpuva_op_map n = {
2327 					.va.addr = req_end,
2328 					.va.range = end - req_end,
2329 					.gem.obj = obj,
2330 					.gem.offset = offset + req_end - addr,
2331 				};
2332 				struct drm_gpuva_op_unmap u = {
2333 					.va = va,
2334 					.keep = merge,
2335 				};
2336 
2337 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2338 				if (ret)
2339 					return ret;
2340 
2341 				if (madvise) {
2342 					struct drm_gpuvm_map_req map_req = {
2343 						.map.va.addr =  addr,
2344 						.map.va.range = req_end - addr,
2345 					};
2346 
2347 					return op_map_cb(ops, priv, &map_req);
2348 				}
2349 				break;
2350 			}
2351 		}
2352 	}
2353 	return op_map_cb(ops, priv, op_map);
2354 }
2355 
2356 static int
2357 __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
2358 		     const struct drm_gpuvm_ops *ops, void *priv,
2359 		     u64 req_addr, u64 req_range)
2360 {
2361 	struct drm_gpuva *va, *next;
2362 	u64 req_end = req_addr + req_range;
2363 	int ret;
2364 
2365 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2366 		return -EINVAL;
2367 
2368 	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2369 		struct drm_gpuva_op_map prev = {}, next = {};
2370 		bool prev_split = false, next_split = false;
2371 		struct drm_gem_object *obj = va->gem.obj;
2372 		u64 offset = va->gem.offset;
2373 		u64 addr = va->va.addr;
2374 		u64 range = va->va.range;
2375 		u64 end = addr + range;
2376 
2377 		if (addr < req_addr) {
2378 			prev.va.addr = addr;
2379 			prev.va.range = req_addr - addr;
2380 			prev.gem.obj = obj;
2381 			prev.gem.offset = offset;
2382 
2383 			prev_split = true;
2384 		}
2385 
2386 		if (end > req_end) {
2387 			next.va.addr = req_end;
2388 			next.va.range = end - req_end;
2389 			next.gem.obj = obj;
2390 			next.gem.offset = offset + (req_end - addr);
2391 
2392 			next_split = true;
2393 		}
2394 
2395 		if (prev_split || next_split) {
2396 			struct drm_gpuva_op_unmap unmap = { .va = va };
2397 
2398 			ret = op_remap_cb(ops, priv,
2399 					  prev_split ? &prev : NULL,
2400 					  next_split ? &next : NULL,
2401 					  &unmap);
2402 			if (ret)
2403 				return ret;
2404 		} else {
2405 			ret = op_unmap_cb(ops, priv, va, false, false);
2406 			if (ret)
2407 				return ret;
2408 		}
2409 	}
2410 
2411 	return 0;
2412 }
2413 
2414 /**
2415  * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
2416  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2417  * @priv: pointer to a driver private data structure
2418  * @req: ptr to struct drm_gpuvm_map_req
2419  *
2420  * This function iterates the given range of the GPU VA space. It utilizes the
2421  * &drm_gpuvm_ops to call back into the driver providing the split and merge
2422  * steps.
2423  *
2424  * Drivers may use these callbacks to update the GPU VA space right away within
2425  * the callback. In case the driver decides to copy and store the operations for
2426  * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
2427  * be called before the &drm_gpuvm's view of the GPU VA space was
2428  * updated with the previous set of operations. To update the
2429  * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2430  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2431  * used.
2432  *
2433  * A sequence of callbacks can contain map, unmap and remap operations, but
2434  * the sequence of callbacks might also be empty if no operation is required,
2435  * e.g. if the requested mapping already exists in the exact same way.
2436  *
2437  * There can be an arbitrary amount of unmap operations, a maximum of two remap
2438  * operations and a single map operation. The latter one represents the original
2439  * map operation requested by the caller.
2440  *
2441  * Returns: 0 on success or a negative error code
2442  */
2443 int
2444 drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2445 		 const struct drm_gpuvm_map_req *req)
2446 {
2447 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2448 
2449 	if (unlikely(!(ops && ops->sm_step_map &&
2450 		       ops->sm_step_remap &&
2451 		       ops->sm_step_unmap)))
2452 		return -EINVAL;
2453 
2454 	return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
2455 }
2456 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
2457 
2458 /**
2459  * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
2460  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2461  * @priv: pointer to a driver private data structure
2462  * @req_addr: the start address of the range to unmap
2463  * @req_range: the range of the mappings to unmap
2464  *
2465  * This function iterates the given range of the GPU VA space. It utilizes the
2466  * &drm_gpuvm_ops to call back into the driver providing the operations to
2467  * unmap and, if required, split existent mappings.
2468  *
2469  * Drivers may use these callbacks to update the GPU VA space right away within
2470  * the callback. In case the driver decides to copy and store the operations for
2471  * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
2472  * called before the &drm_gpuvm's view of the GPU VA space was updated
2473  * with the previous set of operations. To update the &drm_gpuvm's view
2474  * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2475  * drm_gpuva_destroy_unlocked() should be used.
2476  *
2477  * A sequence of callbacks can contain unmap and remap operations, depending on
2478  * whether there are actual overlapping mappings to split.
2479  *
2480  * There can be an arbitrary amount of unmap operations and a maximum of two
2481  * remap operations.
2482  *
2483  * Returns: 0 on success or a negative error code
2484  */
2485 int
2486 drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
2487 		   u64 req_addr, u64 req_range)
2488 {
2489 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2490 
2491 	if (unlikely(!(ops && ops->sm_step_remap &&
2492 		       ops->sm_step_unmap)))
2493 		return -EINVAL;
2494 
2495 	return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
2496 				    req_addr, req_range);
2497 }
2498 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
2499 
2500 static int
2501 drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
2502 {
2503 	struct drm_exec *exec = priv;
2504 
2505 	switch (op->op) {
2506 	case DRM_GPUVA_OP_REMAP:
2507 		if (op->remap.unmap->va->gem.obj)
2508 			return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
2509 		return 0;
2510 	case DRM_GPUVA_OP_UNMAP:
2511 		if (op->unmap.va->gem.obj)
2512 			return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
2513 		return 0;
2514 	default:
2515 		return 0;
2516 	}
2517 }
2518 
2519 static const struct drm_gpuvm_ops lock_ops = {
2520 	.sm_step_map = drm_gpuva_sm_step_lock,
2521 	.sm_step_remap = drm_gpuva_sm_step_lock,
2522 	.sm_step_unmap = drm_gpuva_sm_step_lock,
2523 };
2524 
2525 /**
2526  * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
2527  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2528  * @exec: the &drm_exec locking context
2529  * @num_fences: for newly mapped objects, the # of fences to reserve
2530  * @req: ptr to drm_gpuvm_map_req struct
2531  *
2532  * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2533  * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
2534  * will be newly mapped.
2535  *
2536  * The expected usage is:
2537  *
2538  *    vm_bind {
2539  *        struct drm_exec exec;
2540  *
2541  *        // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
2542  *        drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
2543  *
2544  *        drm_exec_until_all_locked (&exec) {
2545  *            for_each_vm_bind_operation {
2546  *                switch (op->op) {
2547  *                case DRIVER_OP_UNMAP:
2548  *                    ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
2549  *                    break;
2550  *                case DRIVER_OP_MAP:
2551  *                    ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
2552  *                    break;
2553  *                }
2554  *
2555  *                drm_exec_retry_on_contention(&exec);
2556  *                if (ret)
2557  *                    return ret;
2558  *            }
2559  *        }
2560  *    }
2561  *
2562  * This enables all locking to be performed before the driver begins modifying
2563  * the VM.  This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
2564  * where an earlier op can alter the sequence of steps generated for a later
2565  * op, because the later altered step will involve the same GEM object(s)
2566  * already seen in the earlier locking step.  For example:
2567  *
2568  * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
2569  *    DRM_GPUVA_OP_REMAP/UNMAP step.  This is safe because we've already
2570  *    locked the GEM object in the earlier DRIVER_OP_UNMAP op.
2571  *
2572  * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
2573  *    op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
2574  *    required without the earlier DRIVER_OP_MAP.  This is safe because we've
2575  *    already locked the GEM object in the earlier DRIVER_OP_MAP step.
2576  *
2577  * Returns: 0 on success or a negative error codec
2578  */
2579 int
2580 drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
2581 			   struct drm_exec *exec, unsigned int num_fences,
2582 			   struct drm_gpuvm_map_req *req)
2583 {
2584 	struct drm_gem_object *req_obj = req->map.gem.obj;
2585 
2586 	if (req_obj) {
2587 		int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
2588 		if (ret)
2589 			return ret;
2590 	}
2591 
2592 	return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
2593 
2594 }
2595 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
2596 
2597 /**
2598  * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
2599  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2600  * @exec: the &drm_exec locking context
2601  * @req_addr: the start address of the range to unmap
2602  * @req_range: the range of the mappings to unmap
2603  *
2604  * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2605  * remapped by drm_gpuvm_sm_unmap().
2606  *
2607  * See drm_gpuvm_sm_map_exec_lock() for expected usage.
2608  *
2609  * Returns: 0 on success or a negative error code
2610  */
2611 int
2612 drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
2613 			     u64 req_addr, u64 req_range)
2614 {
2615 	return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
2616 				    req_addr, req_range);
2617 }
2618 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
2619 
2620 static struct drm_gpuva_op *
2621 gpuva_op_alloc(struct drm_gpuvm *gpuvm)
2622 {
2623 	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2624 	struct drm_gpuva_op *op;
2625 
2626 	if (fn && fn->op_alloc)
2627 		op = fn->op_alloc();
2628 	else
2629 		op = kzalloc(sizeof(*op), GFP_KERNEL);
2630 
2631 	if (unlikely(!op))
2632 		return NULL;
2633 
2634 	return op;
2635 }
2636 
2637 static void
2638 gpuva_op_free(struct drm_gpuvm *gpuvm,
2639 	      struct drm_gpuva_op *op)
2640 {
2641 	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2642 
2643 	if (fn && fn->op_free)
2644 		fn->op_free(op);
2645 	else
2646 		kfree(op);
2647 }
2648 
2649 static int
2650 drm_gpuva_sm_step(struct drm_gpuva_op *__op,
2651 		  void *priv)
2652 {
2653 	struct {
2654 		struct drm_gpuvm *vm;
2655 		struct drm_gpuva_ops *ops;
2656 	} *args = priv;
2657 	struct drm_gpuvm *gpuvm = args->vm;
2658 	struct drm_gpuva_ops *ops = args->ops;
2659 	struct drm_gpuva_op *op;
2660 
2661 	op = gpuva_op_alloc(gpuvm);
2662 	if (unlikely(!op))
2663 		goto err;
2664 
2665 	memcpy(op, __op, sizeof(*op));
2666 
2667 	if (op->op == DRM_GPUVA_OP_REMAP) {
2668 		struct drm_gpuva_op_remap *__r = &__op->remap;
2669 		struct drm_gpuva_op_remap *r = &op->remap;
2670 
2671 		r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
2672 				   GFP_KERNEL);
2673 		if (unlikely(!r->unmap))
2674 			goto err_free_op;
2675 
2676 		if (__r->prev) {
2677 			r->prev = kmemdup(__r->prev, sizeof(*r->prev),
2678 					  GFP_KERNEL);
2679 			if (unlikely(!r->prev))
2680 				goto err_free_unmap;
2681 		}
2682 
2683 		if (__r->next) {
2684 			r->next = kmemdup(__r->next, sizeof(*r->next),
2685 					  GFP_KERNEL);
2686 			if (unlikely(!r->next))
2687 				goto err_free_prev;
2688 		}
2689 	}
2690 
2691 	list_add_tail(&op->entry, &ops->list);
2692 
2693 	return 0;
2694 
2695 err_free_unmap:
2696 	kfree(op->remap.unmap);
2697 err_free_prev:
2698 	kfree(op->remap.prev);
2699 err_free_op:
2700 	gpuva_op_free(gpuvm, op);
2701 err:
2702 	return -ENOMEM;
2703 }
2704 
2705 static const struct drm_gpuvm_ops gpuvm_list_ops = {
2706 	.sm_step_map = drm_gpuva_sm_step,
2707 	.sm_step_remap = drm_gpuva_sm_step,
2708 	.sm_step_unmap = drm_gpuva_sm_step,
2709 };
2710 
2711 static struct drm_gpuva_ops *
2712 __drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2713 			      const struct drm_gpuvm_map_req *req,
2714 			      bool madvise)
2715 {
2716 	struct drm_gpuva_ops *ops;
2717 	struct {
2718 		struct drm_gpuvm *vm;
2719 		struct drm_gpuva_ops *ops;
2720 	} args;
2721 	int ret;
2722 
2723 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2724 	if (unlikely(!ops))
2725 		return ERR_PTR(-ENOMEM);
2726 
2727 	INIT_LIST_HEAD(&ops->list);
2728 
2729 	args.vm = gpuvm;
2730 	args.ops = ops;
2731 
2732 	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
2733 	if (ret)
2734 		goto err_free_ops;
2735 
2736 	return ops;
2737 
2738 err_free_ops:
2739 	drm_gpuva_ops_free(gpuvm, ops);
2740 	return ERR_PTR(ret);
2741 }
2742 
2743 /**
2744  * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2745  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2746  * @req: map request arguments
2747  *
2748  * This function creates a list of operations to perform splitting and merging
2749  * of existent mapping(s) with the newly requested one.
2750  *
2751  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2752  * in the given order. It can contain map, unmap and remap operations, but it
2753  * also can be empty if no operation is required, e.g. if the requested mapping
2754  * already exists is the exact same way.
2755  *
2756  * There can be an arbitrary amount of unmap operations, a maximum of two remap
2757  * operations and a single map operation. The latter one represents the original
2758  * map operation requested by the caller.
2759  *
2760  * Note that before calling this function again with another mapping request it
2761  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2762  * previously obtained operations must be either processed or abandoned. To
2763  * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2764  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2765  * used.
2766  *
2767  * After the caller finished processing the returned &drm_gpuva_ops, they must
2768  * be freed with &drm_gpuva_ops_free.
2769  *
2770  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2771  */
2772 struct drm_gpuva_ops *
2773 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2774 			    const struct drm_gpuvm_map_req *req)
2775 {
2776 	return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
2777 }
2778 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
2779 
2780 /**
2781  * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
2782  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2783  * @req: map request arguments
2784  *
2785  * This function creates a list of operations to perform splitting
2786  * of existent mapping(s) at start or end, based on the request map.
2787  *
2788  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2789  * in the given order. It can contain map and remap operations, but it
2790  * also can be empty if no operation is required, e.g. if the requested mapping
2791  * already exists is the exact same way.
2792  *
2793  * There will be no unmap operations, a maximum of two remap operations and two
2794  * map operations. The two map operations correspond to: one from start to the
2795  * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
2796  *
2797  * Note that before calling this function again with another mapping request it
2798  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2799  * previously obtained operations must be either processed or abandoned. To
2800  * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2801  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2802  * used.
2803  *
2804  * After the caller finished processing the returned &drm_gpuva_ops, they must
2805  * be freed with &drm_gpuva_ops_free.
2806  *
2807  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2808  */
2809 struct drm_gpuva_ops *
2810 drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
2811 			     const struct drm_gpuvm_map_req *req)
2812 {
2813 	return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
2814 }
2815 EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
2816 
2817 /**
2818  * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
2819  * unmap
2820  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2821  * @req_addr: the start address of the range to unmap
2822  * @req_range: the range of the mappings to unmap
2823  *
2824  * This function creates a list of operations to perform unmapping and, if
2825  * required, splitting of the mappings overlapping the unmap range.
2826  *
2827  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2828  * in the given order. It can contain unmap and remap operations, depending on
2829  * whether there are actual overlapping mappings to split.
2830  *
2831  * There can be an arbitrary amount of unmap operations and a maximum of two
2832  * remap operations.
2833  *
2834  * Note that before calling this function again with another range to unmap it
2835  * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2836  * previously obtained operations must be processed or abandoned. To update the
2837  * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2838  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2839  * used.
2840  *
2841  * After the caller finished processing the returned &drm_gpuva_ops, they must
2842  * be freed with &drm_gpuva_ops_free.
2843  *
2844  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2845  */
2846 struct drm_gpuva_ops *
2847 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
2848 			      u64 req_addr, u64 req_range)
2849 {
2850 	struct drm_gpuva_ops *ops;
2851 	struct {
2852 		struct drm_gpuvm *vm;
2853 		struct drm_gpuva_ops *ops;
2854 	} args;
2855 	int ret;
2856 
2857 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2858 	if (unlikely(!ops))
2859 		return ERR_PTR(-ENOMEM);
2860 
2861 	INIT_LIST_HEAD(&ops->list);
2862 
2863 	args.vm = gpuvm;
2864 	args.ops = ops;
2865 
2866 	ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
2867 				   req_addr, req_range);
2868 	if (ret)
2869 		goto err_free_ops;
2870 
2871 	return ops;
2872 
2873 err_free_ops:
2874 	drm_gpuva_ops_free(gpuvm, ops);
2875 	return ERR_PTR(ret);
2876 }
2877 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
2878 
2879 /**
2880  * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
2881  * @gpuvm: the &drm_gpuvm representing the GPU VA space
2882  * @addr: the start address of the range to prefetch
2883  * @range: the range of the mappings to prefetch
2884  *
2885  * This function creates a list of operations to perform prefetching.
2886  *
2887  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2888  * in the given order. It can contain prefetch operations.
2889  *
2890  * There can be an arbitrary amount of prefetch operations.
2891  *
2892  * After the caller finished processing the returned &drm_gpuva_ops, they must
2893  * be freed with &drm_gpuva_ops_free.
2894  *
2895  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2896  */
2897 struct drm_gpuva_ops *
2898 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
2899 			      u64 addr, u64 range)
2900 {
2901 	struct drm_gpuva_ops *ops;
2902 	struct drm_gpuva_op *op;
2903 	struct drm_gpuva *va;
2904 	u64 end = addr + range;
2905 	int ret;
2906 
2907 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2908 	if (!ops)
2909 		return ERR_PTR(-ENOMEM);
2910 
2911 	INIT_LIST_HEAD(&ops->list);
2912 
2913 	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
2914 		op = gpuva_op_alloc(gpuvm);
2915 		if (!op) {
2916 			ret = -ENOMEM;
2917 			goto err_free_ops;
2918 		}
2919 
2920 		op->op = DRM_GPUVA_OP_PREFETCH;
2921 		op->prefetch.va = va;
2922 		list_add_tail(&op->entry, &ops->list);
2923 	}
2924 
2925 	return ops;
2926 
2927 err_free_ops:
2928 	drm_gpuva_ops_free(gpuvm, ops);
2929 	return ERR_PTR(ret);
2930 }
2931 EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
2932 
2933 /**
2934  * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
2935  * @vm_bo: the &drm_gpuvm_bo abstraction
2936  *
2937  * This function creates a list of operations to perform unmapping for every
2938  * GPUVA attached to a GEM.
2939  *
2940  * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
2941  * arbitrary amount of unmap operations.
2942  *
2943  * After the caller finished processing the returned &drm_gpuva_ops, they must
2944  * be freed with &drm_gpuva_ops_free.
2945  *
2946  * It is the callers responsibility to protect the GEMs GPUVA list against
2947  * concurrent access using the GEMs dma_resv lock.
2948  *
2949  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2950  */
2951 struct drm_gpuva_ops *
2952 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
2953 {
2954 	struct drm_gpuva_ops *ops;
2955 	struct drm_gpuva_op *op;
2956 	struct drm_gpuva *va;
2957 	int ret;
2958 
2959 	drm_gem_gpuva_assert_lock_held(vm_bo->obj);
2960 
2961 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2962 	if (!ops)
2963 		return ERR_PTR(-ENOMEM);
2964 
2965 	INIT_LIST_HEAD(&ops->list);
2966 
2967 	drm_gpuvm_bo_for_each_va(va, vm_bo) {
2968 		op = gpuva_op_alloc(vm_bo->vm);
2969 		if (!op) {
2970 			ret = -ENOMEM;
2971 			goto err_free_ops;
2972 		}
2973 
2974 		op->op = DRM_GPUVA_OP_UNMAP;
2975 		op->unmap.va = va;
2976 		list_add_tail(&op->entry, &ops->list);
2977 	}
2978 
2979 	return ops;
2980 
2981 err_free_ops:
2982 	drm_gpuva_ops_free(vm_bo->vm, ops);
2983 	return ERR_PTR(ret);
2984 }
2985 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
2986 
2987 /**
2988  * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
2989  * @gpuvm: the &drm_gpuvm the ops were created for
2990  * @ops: the &drm_gpuva_ops to free
2991  *
2992  * Frees the given &drm_gpuva_ops structure including all the ops associated
2993  * with it.
2994  */
2995 void
2996 drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
2997 		   struct drm_gpuva_ops *ops)
2998 {
2999 	struct drm_gpuva_op *op, *next;
3000 
3001 	drm_gpuva_for_each_op_safe(op, next, ops) {
3002 		list_del(&op->entry);
3003 
3004 		if (op->op == DRM_GPUVA_OP_REMAP) {
3005 			kfree(op->remap.prev);
3006 			kfree(op->remap.next);
3007 			kfree(op->remap.unmap);
3008 		}
3009 
3010 		gpuva_op_free(gpuvm, op);
3011 	}
3012 
3013 	kfree(ops);
3014 }
3015 EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
3016 
3017 MODULE_DESCRIPTION("DRM GPUVM");
3018 MODULE_LICENSE("GPL");
3019