1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2
3 #ifndef __DRM_GPUVM_H__
4 #define __DRM_GPUVM_H__
5
6 /*
7 * Copyright (c) 2022 Red Hat.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28 #include <linux/dma-resv.h>
29 #include <linux/list.h>
30 #include <linux/rbtree.h>
31 #include <linux/types.h>
32
33 #include <drm/drm_device.h>
34 #include <drm/drm_gem.h>
35 #include <drm/drm_exec.h>
36
37 struct drm_gpuvm;
38 struct drm_gpuvm_bo;
39 struct drm_gpuvm_ops;
40
41 /**
42 * enum drm_gpuva_flags - flags for struct drm_gpuva
43 */
44 enum drm_gpuva_flags {
45 /**
46 * @DRM_GPUVA_INVALIDATED:
47 *
48 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
49 */
50 DRM_GPUVA_INVALIDATED = (1 << 0),
51
52 /**
53 * @DRM_GPUVA_SPARSE:
54 *
55 * Flag indicating that the &drm_gpuva is a sparse mapping.
56 */
57 DRM_GPUVA_SPARSE = (1 << 1),
58
59 /**
60 * @DRM_GPUVA_USERBITS: user defined bits
61 */
62 DRM_GPUVA_USERBITS = (1 << 2),
63 };
64
65 /**
66 * struct drm_gpuva - structure to track a GPU VA mapping
67 *
68 * This structure represents a GPU VA mapping and is associated with a
69 * &drm_gpuvm.
70 *
71 * Typically, this structure is embedded in bigger driver structures.
72 */
73 struct drm_gpuva {
74 /**
75 * @vm: the &drm_gpuvm this object is associated with
76 */
77 struct drm_gpuvm *vm;
78
79 /**
80 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
81 * &drm_gem_object
82 */
83 struct drm_gpuvm_bo *vm_bo;
84
85 /**
86 * @flags: the &drm_gpuva_flags for this mapping
87 */
88 enum drm_gpuva_flags flags;
89
90 /**
91 * @va: structure containing the address and range of the &drm_gpuva
92 */
93 struct {
94 /**
95 * @va.addr: the start address
96 */
97 u64 addr;
98
99 /*
100 * @range: the range
101 */
102 u64 range;
103 } va;
104
105 /**
106 * @gem: structure containing the &drm_gem_object and its offset
107 */
108 struct {
109 /**
110 * @gem.offset: the offset within the &drm_gem_object
111 */
112 u64 offset;
113
114 /**
115 * @gem.obj: the mapped &drm_gem_object
116 */
117 struct drm_gem_object *obj;
118
119 /**
120 * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
121 */
122 struct list_head entry;
123 } gem;
124
125 /**
126 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
127 */
128 struct {
129 /**
130 * @rb.node: the rb-tree node
131 */
132 struct rb_node node;
133
134 /**
135 * @rb.entry: The &list_head to additionally connect &drm_gpuvas
136 * in the same order they appear in the interval tree. This is
137 * useful to keep iterating &drm_gpuvas from a start node found
138 * through the rb-tree while doing modifications on the rb-tree
139 * itself.
140 */
141 struct list_head entry;
142
143 /**
144 * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
145 */
146 u64 __subtree_last;
147 } rb;
148 };
149
150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
151 void drm_gpuva_remove(struct drm_gpuva *va);
152
153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
154 void drm_gpuva_unlink(struct drm_gpuva *va);
155
156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
157 u64 addr, u64 range);
158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
159 u64 addr, u64 range);
160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
162
163 /**
164 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
165 * invalidated
166 * @va: the &drm_gpuva to set the invalidate flag for
167 * @invalidate: indicates whether the &drm_gpuva is invalidated
168 */
drm_gpuva_invalidate(struct drm_gpuva * va,bool invalidate)169 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
170 {
171 if (invalidate)
172 va->flags |= DRM_GPUVA_INVALIDATED;
173 else
174 va->flags &= ~DRM_GPUVA_INVALIDATED;
175 }
176
177 /**
178 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
179 * is invalidated
180 * @va: the &drm_gpuva to check
181 *
182 * Returns: %true if the GPU VA is invalidated, %false otherwise
183 */
drm_gpuva_invalidated(struct drm_gpuva * va)184 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
185 {
186 return va->flags & DRM_GPUVA_INVALIDATED;
187 }
188
189 /**
190 * enum drm_gpuvm_flags - flags for struct drm_gpuvm
191 */
192 enum drm_gpuvm_flags {
193 /**
194 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
195 * GPUVM's &dma_resv lock
196 */
197 DRM_GPUVM_RESV_PROTECTED = BIT(0),
198
199 /**
200 * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed
201 * for modifying the GPUVM during the fence signalling path
202 *
203 * When set, gpuva.lock is used to protect gpuva.list in all GEM
204 * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is
205 * used.
206 */
207 DRM_GPUVM_IMMEDIATE_MODE = BIT(1),
208
209 /**
210 * @DRM_GPUVM_USERBITS: user defined bits
211 */
212 DRM_GPUVM_USERBITS = BIT(2),
213 };
214
215 /**
216 * struct drm_gpuvm - DRM GPU VA Manager
217 *
218 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
219 * &maple_tree structures. Typically, this structure is embedded in bigger
220 * driver structures.
221 *
222 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
223 * pages.
224 *
225 * There should be one manager instance per GPU virtual address space.
226 */
227 struct drm_gpuvm {
228 /**
229 * @name: the name of the DRM GPU VA space
230 */
231 const char *name;
232
233 /**
234 * @flags: the &drm_gpuvm_flags of this GPUVM
235 */
236 enum drm_gpuvm_flags flags;
237
238 /**
239 * @drm: the &drm_device this VM lives in
240 */
241 struct drm_device *drm;
242
243 /**
244 * @mm_start: start of the VA space
245 */
246 u64 mm_start;
247
248 /**
249 * @mm_range: length of the VA space
250 */
251 u64 mm_range;
252
253 /**
254 * @rb: structures to track &drm_gpuva entries
255 */
256 struct {
257 /**
258 * @rb.tree: the rb-tree to track GPU VA mappings
259 */
260 struct rb_root_cached tree;
261
262 /**
263 * @rb.list: the &list_head to track GPU VA mappings
264 */
265 struct list_head list;
266 } rb;
267
268 /**
269 * @kref: reference count of this object
270 */
271 struct kref kref;
272
273 /**
274 * @kernel_alloc_node:
275 *
276 * &drm_gpuva representing the address space cutout reserved for
277 * the kernel
278 */
279 struct drm_gpuva kernel_alloc_node;
280
281 /**
282 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
283 */
284 const struct drm_gpuvm_ops *ops;
285
286 /**
287 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
288 */
289 struct drm_gem_object *r_obj;
290
291 /**
292 * @extobj: structure holding the extobj list
293 */
294 struct {
295 /**
296 * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
297 * external object
298 */
299 struct list_head list;
300
301 /**
302 * @extobj.local_list: pointer to the local list temporarily
303 * storing entries from the external object list
304 */
305 struct list_head *local_list;
306
307 /**
308 * @extobj.lock: spinlock to protect the extobj list
309 */
310 spinlock_t lock;
311 } extobj;
312
313 /**
314 * @evict: structure holding the evict list and evict list lock
315 */
316 struct {
317 /**
318 * @evict.list: &list_head storing &drm_gpuvm_bos currently
319 * being evicted
320 */
321 struct list_head list;
322
323 /**
324 * @evict.local_list: pointer to the local list temporarily
325 * storing entries from the evicted object list
326 */
327 struct list_head *local_list;
328
329 /**
330 * @evict.lock: spinlock to protect the evict list
331 */
332 spinlock_t lock;
333 } evict;
334 };
335
336 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
337 enum drm_gpuvm_flags flags,
338 struct drm_device *drm,
339 struct drm_gem_object *r_obj,
340 u64 start_offset, u64 range,
341 u64 reserve_offset, u64 reserve_range,
342 const struct drm_gpuvm_ops *ops);
343
344 /**
345 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
346 * @gpuvm: the &drm_gpuvm to acquire the reference of
347 *
348 * This function acquires an additional reference to @gpuvm. It is illegal to
349 * call this without already holding a reference. No locks required.
350 *
351 * Returns: the &struct drm_gpuvm pointer
352 */
353 static inline struct drm_gpuvm *
drm_gpuvm_get(struct drm_gpuvm * gpuvm)354 drm_gpuvm_get(struct drm_gpuvm *gpuvm)
355 {
356 kref_get(&gpuvm->kref);
357
358 return gpuvm;
359 }
360
361 void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
362
363 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
364 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
365
366 struct drm_gem_object *
367 drm_gpuvm_resv_object_alloc(struct drm_device *drm);
368
369 /**
370 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
371 * set
372 * @gpuvm: the &drm_gpuvm
373 *
374 * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
375 */
376 static inline bool
drm_gpuvm_resv_protected(struct drm_gpuvm * gpuvm)377 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
378 {
379 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
380 }
381
382 /**
383 * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is
384 * set
385 * @gpuvm: the &drm_gpuvm
386 *
387 * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise.
388 */
389 static inline bool
drm_gpuvm_immediate_mode(struct drm_gpuvm * gpuvm)390 drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm)
391 {
392 return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE;
393 }
394
395 /**
396 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
397 * @gpuvm__: the &drm_gpuvm
398 *
399 * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
400 */
401 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
402
403 /**
404 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
405 * &dma_resv
406 * @gpuvm__: the &drm_gpuvm
407 *
408 * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
409 * &dma_resv
410 */
411 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
412
413 #define drm_gpuvm_resv_held(gpuvm__) \
414 dma_resv_held(drm_gpuvm_resv(gpuvm__))
415
416 #define drm_gpuvm_resv_assert_held(gpuvm__) \
417 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
418
419 #define drm_gpuvm_resv_held(gpuvm__) \
420 dma_resv_held(drm_gpuvm_resv(gpuvm__))
421
422 #define drm_gpuvm_resv_assert_held(gpuvm__) \
423 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
424
425 /**
426 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
427 * external object
428 * @gpuvm: the &drm_gpuvm to check
429 * @obj: the &drm_gem_object to check
430 *
431 * Returns: true if the &drm_gem_object &dma_resv differs from the
432 * &drm_gpuvms &dma_resv, false otherwise
433 */
434 static inline bool
drm_gpuvm_is_extobj(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)435 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
436 struct drm_gem_object *obj)
437 {
438 return obj && obj->resv != drm_gpuvm_resv(gpuvm);
439 }
440
441 static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva * va)442 __drm_gpuva_next(struct drm_gpuva *va)
443 {
444 if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
445 return list_next_entry(va, rb.entry);
446
447 return NULL;
448 }
449
450 /**
451 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
452 * @va__: &drm_gpuva structure to assign to in each iteration step
453 * @gpuvm__: &drm_gpuvm to walk over
454 * @start__: starting offset, the first gpuva will overlap this
455 * @end__: ending offset, the last gpuva will start before this (but may
456 * overlap)
457 *
458 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
459 * between @start__ and @end__. It is implemented similarly to list_for_each(),
460 * but is using the &drm_gpuvm's internal interval tree to accelerate
461 * the search for the starting &drm_gpuva, and hence isn't safe against removal
462 * of elements. It assumes that @end__ is within (or is the upper limit of) the
463 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
464 * @kernel_alloc_node.
465 */
466 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
467 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
468 va__ && (va__->va.addr < (end__)); \
469 va__ = __drm_gpuva_next(va__))
470
471 /**
472 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
473 * &drm_gpuvas
474 * @va__: &drm_gpuva to assign to in each iteration step
475 * @next__: another &drm_gpuva to use as temporary storage
476 * @gpuvm__: &drm_gpuvm to walk over
477 * @start__: starting offset, the first gpuva will overlap this
478 * @end__: ending offset, the last gpuva will start before this (but may
479 * overlap)
480 *
481 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
482 * between @start__ and @end__. It is implemented similarly to
483 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
484 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
485 * against removal of elements. It assumes that @end__ is within (or is the
486 * upper limit of) the &drm_gpuvm. This iterator does not skip over the
487 * &drm_gpuvm's @kernel_alloc_node.
488 */
489 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
490 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
491 next__ = __drm_gpuva_next(va__); \
492 va__ && (va__->va.addr < (end__)); \
493 va__ = next__, next__ = __drm_gpuva_next(va__))
494
495 /**
496 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
497 * @va__: &drm_gpuva to assign to in each iteration step
498 * @gpuvm__: &drm_gpuvm to walk over
499 *
500 * This iterator walks over all &drm_gpuva structures associated with the given
501 * &drm_gpuvm.
502 */
503 #define drm_gpuvm_for_each_va(va__, gpuvm__) \
504 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
505
506 /**
507 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
508 * @va__: &drm_gpuva to assign to in each iteration step
509 * @next__: another &drm_gpuva to use as temporary storage
510 * @gpuvm__: &drm_gpuvm to walk over
511 *
512 * This iterator walks over all &drm_gpuva structures associated with the given
513 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
514 * hence safe against the removal of elements.
515 */
516 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
517 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
518
519 /**
520 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
521 *
522 * This structure should be created on the stack as &drm_exec should be.
523 *
524 * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
525 */
526 struct drm_gpuvm_exec {
527 /**
528 * @exec: the &drm_exec structure
529 */
530 struct drm_exec exec;
531
532 /**
533 * @flags: the flags for the struct drm_exec
534 */
535 u32 flags;
536
537 /**
538 * @vm: the &drm_gpuvm to lock its DMA reservations
539 */
540 struct drm_gpuvm *vm;
541
542 /**
543 * @num_fences: the number of fences to reserve for the &dma_resv of the
544 * locked &drm_gem_objects
545 */
546 unsigned int num_fences;
547
548 /**
549 * @extra: Callback and corresponding private data for the driver to
550 * lock arbitrary additional &drm_gem_objects.
551 */
552 struct {
553 /**
554 * @extra.fn: The driver callback to lock additional
555 * &drm_gem_objects.
556 */
557 int (*fn)(struct drm_gpuvm_exec *vm_exec);
558
559 /**
560 * @extra.priv: driver private data for the @fn callback
561 */
562 void *priv;
563 } extra;
564 };
565
566 int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
567 struct drm_exec *exec,
568 unsigned int num_fences);
569
570 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
571 struct drm_exec *exec,
572 unsigned int num_fences);
573
574 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
575 struct drm_exec *exec,
576 u64 addr, u64 range,
577 unsigned int num_fences);
578
579 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
580
581 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
582 struct drm_gem_object **objs,
583 unsigned int num_objs);
584
585 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
586 u64 addr, u64 range);
587
588 /**
589 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
590 * @vm_exec: the &drm_gpuvm_exec wrapper
591 *
592 * Releases all dma-resv locks of all &drm_gem_objects previously acquired
593 * through drm_gpuvm_exec_lock() or its variants.
594 *
595 * Returns: 0 on success, negative error code on failure.
596 */
597 static inline void
drm_gpuvm_exec_unlock(struct drm_gpuvm_exec * vm_exec)598 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
599 {
600 drm_exec_fini(&vm_exec->exec);
601 }
602
603 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
604 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
605 struct drm_exec *exec,
606 struct dma_fence *fence,
607 enum dma_resv_usage private_usage,
608 enum dma_resv_usage extobj_usage);
609
610 /**
611 * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
612 * @vm_exec: the &drm_gpuvm_exec wrapper
613 * @fence: fence to add
614 * @private_usage: private dma-resv usage
615 * @extobj_usage: extobj dma-resv usage
616 *
617 * See drm_gpuvm_resv_add_fence().
618 */
619 static inline void
drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec * vm_exec,struct dma_fence * fence,enum dma_resv_usage private_usage,enum dma_resv_usage extobj_usage)620 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
621 struct dma_fence *fence,
622 enum dma_resv_usage private_usage,
623 enum dma_resv_usage extobj_usage)
624 {
625 drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
626 private_usage, extobj_usage);
627 }
628
629 /**
630 * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
631 * @vm_exec: the &drm_gpuvm_exec wrapper
632 *
633 * See drm_gpuvm_validate().
634 *
635 * Returns: 0 on success, negative error code on failure.
636 */
637 static inline int
drm_gpuvm_exec_validate(struct drm_gpuvm_exec * vm_exec)638 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
639 {
640 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
641 }
642
643 /**
644 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
645 * &drm_gem_object combination
646 *
647 * This structure is an abstraction representing a &drm_gpuvm and
648 * &drm_gem_object combination. It serves as an indirection to accelerate
649 * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
650 * &drm_gem_object.
651 *
652 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
653 * accelerate validation.
654 *
655 * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
656 * a GEM object is mapped first in a GPU-VM and release the instance once the
657 * last mapping of the GEM object in this GPU-VM is unmapped.
658 */
659 struct drm_gpuvm_bo {
660 /**
661 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
662 * counted pointer.
663 */
664 struct drm_gpuvm *vm;
665
666 /**
667 * @obj: The &drm_gem_object being mapped in @vm. This is a reference
668 * counted pointer.
669 */
670 struct drm_gem_object *obj;
671
672 /**
673 * @evicted: Indicates whether the &drm_gem_object is evicted; field
674 * protected by the &drm_gem_object's dma-resv lock.
675 */
676 bool evicted;
677
678 /**
679 * @kref: The reference count for this &drm_gpuvm_bo.
680 */
681 struct kref kref;
682
683 /**
684 * @list: Structure containing all &list_heads.
685 */
686 struct {
687 /**
688 * @list.gpuva: The list of linked &drm_gpuvas.
689 *
690 * It is safe to access entries from this list as long as the
691 * GEM's gpuva lock is held. See also struct drm_gem_object.
692 */
693 struct list_head gpuva;
694
695 /**
696 * @list.entry: Structure containing all &list_heads serving as
697 * entry.
698 */
699 struct {
700 /**
701 * @list.entry.gem: List entry to attach to the
702 * &drm_gem_objects gpuva list.
703 */
704 struct list_head gem;
705
706 /**
707 * @list.entry.evict: List entry to attach to the
708 * &drm_gpuvms extobj list.
709 */
710 struct list_head extobj;
711
712 /**
713 * @list.entry.evict: List entry to attach to the
714 * &drm_gpuvms evict list.
715 */
716 struct list_head evict;
717 } entry;
718 } list;
719 };
720
721 struct drm_gpuvm_bo *
722 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
723 struct drm_gem_object *obj);
724
725 struct drm_gpuvm_bo *
726 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
727 struct drm_gem_object *obj);
728 struct drm_gpuvm_bo *
729 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
730
731 /**
732 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
733 * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
734 *
735 * This function acquires an additional reference to @vm_bo. It is illegal to
736 * call this without already holding a reference. No locks required.
737 *
738 * Returns: the &struct vm_bo pointer
739 */
740 static inline struct drm_gpuvm_bo *
drm_gpuvm_bo_get(struct drm_gpuvm_bo * vm_bo)741 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
742 {
743 kref_get(&vm_bo->kref);
744 return vm_bo;
745 }
746
747 bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
748
749 struct drm_gpuvm_bo *
750 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
751 struct drm_gem_object *obj);
752
753 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
754
755 /**
756 * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
757 * to/from the &drm_gpuvms evicted list
758 * @obj: the &drm_gem_object
759 * @evict: indicates whether @obj is evicted
760 *
761 * See drm_gpuvm_bo_evict().
762 */
763 static inline void
drm_gpuvm_bo_gem_evict(struct drm_gem_object * obj,bool evict)764 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
765 {
766 struct drm_gpuvm_bo *vm_bo;
767
768 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
769 drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
770 drm_gpuvm_bo_evict(vm_bo, evict);
771 }
772 }
773
774 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
775
776 /**
777 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
778 * @va__: &drm_gpuva structure to assign to in each iteration step
779 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
780 *
781 * This iterator walks over all &drm_gpuva structures associated with the
782 * &drm_gpuvm_bo.
783 *
784 * The caller must hold the GEM's gpuva lock.
785 */
786 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
787 list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
788
789 /**
790 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
791 * &drm_gpuva
792 * @va__: &drm_gpuva structure to assign to in each iteration step
793 * @next__: &next &drm_gpuva to store the next step
794 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
795 *
796 * This iterator walks over all &drm_gpuva structures associated with the
797 * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
798 * it is save against removal of elements.
799 *
800 * The caller must hold the GEM's gpuva lock.
801 */
802 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
803 list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
804
805 /**
806 * enum drm_gpuva_op_type - GPU VA operation type
807 *
808 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
809 */
810 enum drm_gpuva_op_type {
811 /**
812 * @DRM_GPUVA_OP_MAP: the map op type
813 */
814 DRM_GPUVA_OP_MAP,
815
816 /**
817 * @DRM_GPUVA_OP_REMAP: the remap op type
818 */
819 DRM_GPUVA_OP_REMAP,
820
821 /**
822 * @DRM_GPUVA_OP_UNMAP: the unmap op type
823 */
824 DRM_GPUVA_OP_UNMAP,
825
826 /**
827 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
828 */
829 DRM_GPUVA_OP_PREFETCH,
830
831 /**
832 * @DRM_GPUVA_OP_DRIVER: the driver defined op type
833 */
834 DRM_GPUVA_OP_DRIVER,
835 };
836
837 /**
838 * struct drm_gpuva_op_map - GPU VA map operation
839 *
840 * This structure represents a single map operation generated by the
841 * DRM GPU VA manager.
842 */
843 struct drm_gpuva_op_map {
844 /**
845 * @va: structure containing address and range of a map
846 * operation
847 */
848 struct {
849 /**
850 * @va.addr: the base address of the new mapping
851 */
852 u64 addr;
853
854 /**
855 * @va.range: the range of the new mapping
856 */
857 u64 range;
858 } va;
859
860 /**
861 * @gem: structure containing the &drm_gem_object and its offset
862 */
863 struct {
864 /**
865 * @gem.offset: the offset within the &drm_gem_object
866 */
867 u64 offset;
868
869 /**
870 * @gem.obj: the &drm_gem_object to map
871 */
872 struct drm_gem_object *obj;
873 } gem;
874 };
875
876 /**
877 * struct drm_gpuva_op_unmap - GPU VA unmap operation
878 *
879 * This structure represents a single unmap operation generated by the
880 * DRM GPU VA manager.
881 */
882 struct drm_gpuva_op_unmap {
883 /**
884 * @va: the &drm_gpuva to unmap
885 */
886 struct drm_gpuva *va;
887
888 /**
889 * @keep:
890 *
891 * Indicates whether this &drm_gpuva is physically contiguous with the
892 * original mapping request.
893 *
894 * Optionally, if &keep is set, drivers may keep the actual page table
895 * mappings for this &drm_gpuva, adding the missing page table entries
896 * only and update the &drm_gpuvm accordingly.
897 */
898 bool keep;
899 };
900
901 /**
902 * struct drm_gpuva_op_remap - GPU VA remap operation
903 *
904 * This represents a single remap operation generated by the DRM GPU VA manager.
905 *
906 * A remap operation is generated when an existing GPU VA mmapping is split up
907 * by inserting a new GPU VA mapping or by partially unmapping existent
908 * mapping(s), hence it consists of a maximum of two map and one unmap
909 * operation.
910 *
911 * The @unmap operation takes care of removing the original existing mapping.
912 * @prev is used to remap the preceding part, @next the subsequent part.
913 *
914 * If either a new mapping's start address is aligned with the start address
915 * of the old mapping or the new mapping's end address is aligned with the
916 * end address of the old mapping, either @prev or @next is NULL.
917 *
918 * Note, the reason for a dedicated remap operation, rather than arbitrary
919 * unmap and map operations, is to give drivers the chance of extracting driver
920 * specific data for creating the new mappings from the unmap operations's
921 * &drm_gpuva structure which typically is embedded in larger driver specific
922 * structures.
923 */
924 struct drm_gpuva_op_remap {
925 /**
926 * @prev: the preceding part of a split mapping
927 */
928 struct drm_gpuva_op_map *prev;
929
930 /**
931 * @next: the subsequent part of a split mapping
932 */
933 struct drm_gpuva_op_map *next;
934
935 /**
936 * @unmap: the unmap operation for the original existing mapping
937 */
938 struct drm_gpuva_op_unmap *unmap;
939 };
940
941 /**
942 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
943 *
944 * This structure represents a single prefetch operation generated by the
945 * DRM GPU VA manager.
946 */
947 struct drm_gpuva_op_prefetch {
948 /**
949 * @va: the &drm_gpuva to prefetch
950 */
951 struct drm_gpuva *va;
952 };
953
954 /**
955 * struct drm_gpuva_op - GPU VA operation
956 *
957 * This structure represents a single generic operation.
958 *
959 * The particular type of the operation is defined by @op.
960 */
961 struct drm_gpuva_op {
962 /**
963 * @entry:
964 *
965 * The &list_head used to distribute instances of this struct within
966 * &drm_gpuva_ops.
967 */
968 struct list_head entry;
969
970 /**
971 * @op: the type of the operation
972 */
973 enum drm_gpuva_op_type op;
974
975 union {
976 /**
977 * @map: the map operation
978 */
979 struct drm_gpuva_op_map map;
980
981 /**
982 * @remap: the remap operation
983 */
984 struct drm_gpuva_op_remap remap;
985
986 /**
987 * @unmap: the unmap operation
988 */
989 struct drm_gpuva_op_unmap unmap;
990
991 /**
992 * @prefetch: the prefetch operation
993 */
994 struct drm_gpuva_op_prefetch prefetch;
995 };
996 };
997
998 /**
999 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
1000 */
1001 struct drm_gpuva_ops {
1002 /**
1003 * @list: the &list_head
1004 */
1005 struct list_head list;
1006 };
1007
1008 /**
1009 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
1010 * @op: &drm_gpuva_op to assign in each iteration step
1011 * @ops: &drm_gpuva_ops to walk
1012 *
1013 * This iterator walks over all ops within a given list of operations.
1014 */
1015 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
1016
1017 /**
1018 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
1019 * @op: &drm_gpuva_op to assign in each iteration step
1020 * @next: &next &drm_gpuva_op to store the next step
1021 * @ops: &drm_gpuva_ops to walk
1022 *
1023 * This iterator walks over all ops within a given list of operations. It is
1024 * implemented with list_for_each_safe(), so save against removal of elements.
1025 */
1026 #define drm_gpuva_for_each_op_safe(op, next, ops) \
1027 list_for_each_entry_safe(op, next, &(ops)->list, entry)
1028
1029 /**
1030 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1031 * @op: &drm_gpuva_op to assign in each iteration step
1032 * @ops: &drm_gpuva_ops to walk
1033 *
1034 * This iterator walks over all ops within a given list of operations beginning
1035 * from the given operation in reverse order.
1036 */
1037 #define drm_gpuva_for_each_op_from_reverse(op, ops) \
1038 list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1039
1040 /**
1041 * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
1042 * @op: &drm_gpuva_op to assign in each iteration step
1043 * @ops: &drm_gpuva_ops to walk
1044 *
1045 * This iterator walks over all ops within a given list of operations in reverse
1046 */
1047 #define drm_gpuva_for_each_op_reverse(op, ops) \
1048 list_for_each_entry_reverse(op, &(ops)->list, entry)
1049
1050 /**
1051 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1052 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
1053 */
1054 #define drm_gpuva_first_op(ops) \
1055 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1056
1057 /**
1058 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1059 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
1060 */
1061 #define drm_gpuva_last_op(ops) \
1062 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1063
1064 /**
1065 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1066 * @op: the current &drm_gpuva_op
1067 */
1068 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
1069
1070 /**
1071 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1072 * @op: the current &drm_gpuva_op
1073 */
1074 #define drm_gpuva_next_op(op) list_next_entry(op, entry)
1075
1076 /**
1077 * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
1078 */
1079 struct drm_gpuvm_map_req {
1080 /**
1081 * @op_map: struct drm_gpuva_op_map
1082 */
1083 struct drm_gpuva_op_map map;
1084 };
1085
1086 struct drm_gpuva_ops *
1087 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1088 const struct drm_gpuvm_map_req *req);
1089 struct drm_gpuva_ops *
1090 drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
1091 const struct drm_gpuvm_map_req *req);
1092
1093 struct drm_gpuva_ops *
1094 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
1095 u64 addr, u64 range);
1096
1097 struct drm_gpuva_ops *
1098 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
1099 u64 addr, u64 range);
1100
1101 struct drm_gpuva_ops *
1102 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
1103
1104 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
1105 struct drm_gpuva_ops *ops);
1106
drm_gpuva_init_from_op(struct drm_gpuva * va,struct drm_gpuva_op_map * op)1107 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
1108 struct drm_gpuva_op_map *op)
1109 {
1110 va->va.addr = op->va.addr;
1111 va->va.range = op->va.range;
1112 va->gem.obj = op->gem.obj;
1113 va->gem.offset = op->gem.offset;
1114 }
1115
1116 /**
1117 * struct drm_gpuvm_ops - callbacks for split/merge steps
1118 *
1119 * This structure defines the callbacks used by &drm_gpuvm_sm_map and
1120 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
1121 * operations to drivers.
1122 */
1123 struct drm_gpuvm_ops {
1124 /**
1125 * @vm_free: called when the last reference of a struct drm_gpuvm is
1126 * dropped
1127 *
1128 * This callback is mandatory.
1129 */
1130 void (*vm_free)(struct drm_gpuvm *gpuvm);
1131
1132 /**
1133 * @op_alloc: called when the &drm_gpuvm allocates
1134 * a struct drm_gpuva_op
1135 *
1136 * Some drivers may want to embed struct drm_gpuva_op into driver
1137 * specific structures. By implementing this callback drivers can
1138 * allocate memory accordingly.
1139 *
1140 * This callback is optional.
1141 */
1142 struct drm_gpuva_op *(*op_alloc)(void);
1143
1144 /**
1145 * @op_free: called when the &drm_gpuvm frees a
1146 * struct drm_gpuva_op
1147 *
1148 * Some drivers may want to embed struct drm_gpuva_op into driver
1149 * specific structures. By implementing this callback drivers can
1150 * free the previously allocated memory accordingly.
1151 *
1152 * This callback is optional.
1153 */
1154 void (*op_free)(struct drm_gpuva_op *op);
1155
1156 /**
1157 * @vm_bo_alloc: called when the &drm_gpuvm allocates
1158 * a struct drm_gpuvm_bo
1159 *
1160 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1161 * specific structures. By implementing this callback drivers can
1162 * allocate memory accordingly.
1163 *
1164 * This callback is optional.
1165 */
1166 struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
1167
1168 /**
1169 * @vm_bo_free: called when the &drm_gpuvm frees a
1170 * struct drm_gpuvm_bo
1171 *
1172 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1173 * specific structures. By implementing this callback drivers can
1174 * free the previously allocated memory accordingly.
1175 *
1176 * This callback is optional.
1177 */
1178 void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
1179
1180 /**
1181 * @vm_bo_validate: called from drm_gpuvm_validate()
1182 *
1183 * Drivers receive this callback for every evicted &drm_gem_object being
1184 * mapped in the corresponding &drm_gpuvm.
1185 *
1186 * Typically, drivers would call their driver specific variant of
1187 * ttm_bo_validate() from within this callback.
1188 */
1189 int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
1190 struct drm_exec *exec);
1191
1192 /**
1193 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
1194 * mapping once all previous steps were completed
1195 *
1196 * The &priv pointer matches the one the driver passed to
1197 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1198 *
1199 * Can be NULL if &drm_gpuvm_sm_map is used.
1200 */
1201 int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
1202
1203 /**
1204 * @sm_step_remap: called from &drm_gpuvm_sm_map and
1205 * &drm_gpuvm_sm_unmap to split up an existent mapping
1206 *
1207 * This callback is called when existent mapping needs to be split up.
1208 * This is the case when either a newly requested mapping overlaps or
1209 * is enclosed by an existent mapping or a partial unmap of an existent
1210 * mapping is requested.
1211 *
1212 * The &priv pointer matches the one the driver passed to
1213 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1214 *
1215 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1216 * used.
1217 */
1218 int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
1219
1220 /**
1221 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
1222 * &drm_gpuvm_sm_unmap to unmap an existing mapping
1223 *
1224 * This callback is called when existing mapping needs to be unmapped.
1225 * This is the case when either a newly requested mapping encloses an
1226 * existing mapping or an unmap of an existing mapping is requested.
1227 *
1228 * The &priv pointer matches the one the driver passed to
1229 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1230 *
1231 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1232 * used.
1233 */
1234 int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
1235 };
1236
1237 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1238 const struct drm_gpuvm_map_req *req);
1239
1240 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
1241 u64 addr, u64 range);
1242
1243 int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
1244 struct drm_exec *exec, unsigned int num_fences,
1245 struct drm_gpuvm_map_req *req);
1246
1247 int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1248 u64 req_addr, u64 req_range);
1249
1250 void drm_gpuva_map(struct drm_gpuvm *gpuvm,
1251 struct drm_gpuva *va,
1252 struct drm_gpuva_op_map *op);
1253
1254 void drm_gpuva_remap(struct drm_gpuva *prev,
1255 struct drm_gpuva *next,
1256 struct drm_gpuva_op_remap *op);
1257
1258 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
1259
1260 /**
1261 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1262 * the unmap stage of a remap op.
1263 * @op: Remap op.
1264 * @start_addr: Output pointer for the start of the required unmap.
1265 * @range: Output pointer for the length of the required unmap.
1266 *
1267 * The given start address and range will be set such that they represent the
1268 * range of the address space that was previously covered by the mapping being
1269 * re-mapped, but is now empty.
1270 */
1271 static inline void
drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap * op,u64 * start_addr,u64 * range)1272 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
1273 u64 *start_addr, u64 *range)
1274 {
1275 const u64 va_start = op->prev ?
1276 op->prev->va.addr + op->prev->va.range :
1277 op->unmap->va->va.addr;
1278 const u64 va_end = op->next ?
1279 op->next->va.addr :
1280 op->unmap->va->va.addr + op->unmap->va->va.range;
1281
1282 if (start_addr)
1283 *start_addr = va_start;
1284 if (range)
1285 *range = va_end - va_start;
1286 }
1287
1288 #endif /* __DRM_GPUVM_H__ */
1289