1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <linux/jiffies.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/file.h>
43 #include <linux/module.h>
44 #include <linux/atomic.h>
45 #include <linux/cgroup_dmem.h>
46 #include <linux/dma-resv.h>
47
48 #include "ttm_module.h"
49
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)50 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
51 struct ttm_placement *placement)
52 {
53 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
54 struct ttm_resource_manager *man;
55 int i, mem_type;
56
57 for (i = 0; i < placement->num_placement; i++) {
58 mem_type = placement->placement[i].mem_type;
59 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
60 i, placement->placement[i].flags, mem_type);
61 man = ttm_manager_type(bo->bdev, mem_type);
62 ttm_resource_manager_debug(man, &p);
63 }
64 }
65
66 /**
67 * ttm_bo_move_to_lru_tail
68 *
69 * @bo: The buffer object.
70 *
71 * Move this BO to the tail of all lru lists used to lookup and reserve an
72 * object. This function must be called with struct ttm_global::lru_lock
73 * held, and is used to make a BO less likely to be considered for eviction.
74 */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)75 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
76 {
77 dma_resv_assert_held(bo->base.resv);
78
79 if (bo->resource)
80 ttm_resource_move_to_lru_tail(bo->resource);
81 }
82 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
83
84 /**
85 * ttm_bo_set_bulk_move - update BOs bulk move object
86 *
87 * @bo: The buffer object.
88 * @bulk: bulk move structure
89 *
90 * Update the BOs bulk move object, making sure that resources are added/removed
91 * as well. A bulk move allows to move many resource on the LRU at once,
92 * resulting in much less overhead of maintaining the LRU.
93 * The only requirement is that the resources stay together on the LRU and are
94 * never separated. This is enforces by setting the bulk_move structure on a BO.
95 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
96 * their LRU list.
97 */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)98 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
99 struct ttm_lru_bulk_move *bulk)
100 {
101 dma_resv_assert_held(bo->base.resv);
102
103 if (bo->bulk_move == bulk)
104 return;
105
106 spin_lock(&bo->bdev->lru_lock);
107 if (bo->resource)
108 ttm_resource_del_bulk_move(bo->resource, bo);
109 bo->bulk_move = bulk;
110 if (bo->resource)
111 ttm_resource_add_bulk_move(bo->resource, bo);
112 spin_unlock(&bo->bdev->lru_lock);
113 }
114 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
115
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)116 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
117 struct ttm_resource *mem, bool evict,
118 struct ttm_operation_ctx *ctx,
119 struct ttm_place *hop)
120 {
121 struct ttm_device *bdev = bo->bdev;
122 bool old_use_tt, new_use_tt;
123 int ret;
124
125 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
126 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
127
128 ttm_bo_unmap_virtual(bo);
129
130 /*
131 * Create and bind a ttm if required.
132 */
133
134 if (new_use_tt) {
135 /* Zero init the new TTM structure if the old location should
136 * have used one as well.
137 */
138 ret = ttm_tt_create(bo, old_use_tt);
139 if (ret)
140 goto out_err;
141
142 if (mem->mem_type != TTM_PL_SYSTEM) {
143 ret = ttm_bo_populate(bo, ctx);
144 if (ret)
145 goto out_err;
146 }
147 }
148
149 ret = dma_resv_reserve_fences(bo->base.resv, 1);
150 if (ret)
151 goto out_err;
152
153 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
154 if (ret) {
155 if (ret == -EMULTIHOP)
156 return ret;
157 goto out_err;
158 }
159
160 ctx->bytes_moved += bo->base.size;
161 return 0;
162
163 out_err:
164 if (!old_use_tt)
165 ttm_bo_tt_destroy(bo);
166
167 return ret;
168 }
169
170 /*
171 * Call bo::reserved.
172 * Will release GPU memory type usage on destruction.
173 * This is the place to put in driver specific hooks to release
174 * driver private resources.
175 * Will release the bo::reserved lock.
176 */
177
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
179 {
180 if (bo->bdev->funcs->delete_mem_notify)
181 bo->bdev->funcs->delete_mem_notify(bo);
182
183 ttm_bo_tt_destroy(bo);
184 ttm_resource_free(bo, &bo->resource);
185 }
186
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
188 {
189 int r;
190
191 if (bo->base.resv == &bo->base._resv)
192 return 0;
193
194 BUG_ON(!dma_resv_trylock(&bo->base._resv));
195
196 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
197 dma_resv_unlock(&bo->base._resv);
198 if (r)
199 return r;
200
201 if (bo->type != ttm_bo_type_sg) {
202 /* This works because the BO is about to be destroyed and nobody
203 * reference it any more. The only tricky case is the trylock on
204 * the resv object while holding the lru_lock.
205 */
206 spin_lock(&bo->bdev->lru_lock);
207 bo->base.resv = &bo->base._resv;
208 spin_unlock(&bo->bdev->lru_lock);
209 }
210
211 return r;
212 }
213
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
215 {
216 struct dma_resv *resv = &bo->base._resv;
217 struct dma_resv_iter cursor;
218 struct dma_fence *fence;
219
220 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
221 dma_resv_for_each_fence_unlocked(&cursor, fence) {
222 if (!fence->ops->signaled)
223 dma_fence_enable_sw_signaling(fence);
224 }
225 dma_resv_iter_end(&cursor);
226 }
227
228 /*
229 * Block for the dma_resv object to become idle, lock the buffer and clean up
230 * the resource and tt object.
231 */
ttm_bo_delayed_delete(struct work_struct * work)232 static void ttm_bo_delayed_delete(struct work_struct *work)
233 {
234 struct ttm_buffer_object *bo;
235
236 bo = container_of(work, typeof(*bo), delayed_delete);
237
238 dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
239 MAX_SCHEDULE_TIMEOUT);
240 dma_resv_lock(bo->base.resv, NULL);
241 ttm_bo_cleanup_memtype_use(bo);
242 dma_resv_unlock(bo->base.resv);
243 ttm_bo_put(bo);
244 }
245
ttm_bo_release(struct kref * kref)246 static void ttm_bo_release(struct kref *kref)
247 {
248 struct ttm_buffer_object *bo =
249 container_of(kref, struct ttm_buffer_object, kref);
250 struct ttm_device *bdev = bo->bdev;
251 int ret;
252
253 WARN_ON_ONCE(bo->pin_count);
254 WARN_ON_ONCE(bo->bulk_move);
255
256 if (!bo->deleted) {
257 ret = ttm_bo_individualize_resv(bo);
258 if (ret) {
259 /* Last resort, if we fail to allocate memory for the
260 * fences block for the BO to become idle
261 */
262 dma_resv_wait_timeout(bo->base.resv,
263 DMA_RESV_USAGE_BOOKKEEP, false,
264 30 * HZ);
265 }
266
267 if (bo->bdev->funcs->release_notify)
268 bo->bdev->funcs->release_notify(bo);
269
270 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
271 ttm_mem_io_free(bdev, bo->resource);
272
273 if (!dma_resv_test_signaled(&bo->base._resv,
274 DMA_RESV_USAGE_BOOKKEEP) ||
275 (want_init_on_free() && (bo->ttm != NULL)) ||
276 bo->type == ttm_bo_type_sg ||
277 !dma_resv_trylock(bo->base.resv)) {
278 /* The BO is not idle, resurrect it for delayed destroy */
279 ttm_bo_flush_all_fences(bo);
280 bo->deleted = true;
281
282 spin_lock(&bo->bdev->lru_lock);
283
284 /*
285 * Make pinned bos immediately available to
286 * shrinkers, now that they are queued for
287 * destruction.
288 *
289 * FIXME: QXL is triggering this. Can be removed when the
290 * driver is fixed.
291 */
292 if (bo->pin_count) {
293 bo->pin_count = 0;
294 ttm_resource_move_to_lru_tail(bo->resource);
295 }
296
297 kref_init(&bo->kref);
298 spin_unlock(&bo->bdev->lru_lock);
299
300 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
301
302 /* Schedule the worker on the closest NUMA node. This
303 * improves performance since system memory might be
304 * cleared on free and that is best done on a CPU core
305 * close to it.
306 */
307 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
308 return;
309 }
310
311 ttm_bo_cleanup_memtype_use(bo);
312 dma_resv_unlock(bo->base.resv);
313 }
314
315 atomic_dec(&ttm_glob.bo_count);
316 bo->destroy(bo);
317 }
318
319 /**
320 * ttm_bo_put
321 *
322 * @bo: The buffer object.
323 *
324 * Unreference a buffer object.
325 */
ttm_bo_put(struct ttm_buffer_object * bo)326 void ttm_bo_put(struct ttm_buffer_object *bo)
327 {
328 kref_put(&bo->kref, ttm_bo_release);
329 }
330 EXPORT_SYMBOL(ttm_bo_put);
331
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_place * hop)332 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
333 struct ttm_operation_ctx *ctx,
334 struct ttm_place *hop)
335 {
336 struct ttm_placement hop_placement;
337 struct ttm_resource *hop_mem;
338 int ret;
339
340 hop_placement.num_placement = 1;
341 hop_placement.placement = hop;
342
343 /* find space in the bounce domain */
344 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
345 if (ret)
346 return ret;
347 /* move to the bounce domain */
348 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
349 if (ret) {
350 ttm_resource_free(bo, &hop_mem);
351 return ret;
352 }
353 return 0;
354 }
355
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)356 static int ttm_bo_evict(struct ttm_buffer_object *bo,
357 struct ttm_operation_ctx *ctx)
358 {
359 struct ttm_device *bdev = bo->bdev;
360 struct ttm_resource *evict_mem;
361 struct ttm_placement placement;
362 struct ttm_place hop;
363 int ret = 0;
364
365 memset(&hop, 0, sizeof(hop));
366
367 dma_resv_assert_held(bo->base.resv);
368
369 placement.num_placement = 0;
370 bdev->funcs->evict_flags(bo, &placement);
371
372 if (!placement.num_placement) {
373 ret = ttm_bo_wait_ctx(bo, ctx);
374 if (ret)
375 return ret;
376
377 /*
378 * Since we've already synced, this frees backing store
379 * immediately.
380 */
381 return ttm_bo_pipeline_gutting(bo);
382 }
383
384 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
385 if (ret) {
386 if (ret != -ERESTARTSYS) {
387 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
388 bo);
389 ttm_bo_mem_space_debug(bo, &placement);
390 }
391 goto out;
392 }
393
394 do {
395 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
396 if (ret != -EMULTIHOP)
397 break;
398
399 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
400 } while (!ret);
401
402 if (ret) {
403 ttm_resource_free(bo, &evict_mem);
404 if (ret != -ERESTARTSYS && ret != -EINTR)
405 pr_err("Buffer eviction failed\n");
406 }
407 out:
408 return ret;
409 }
410
411 /**
412 * ttm_bo_eviction_valuable
413 *
414 * @bo: The buffer object to evict
415 * @place: the placement we need to make room for
416 *
417 * Check if it is valuable to evict the BO to make room for the given placement.
418 */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)419 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
420 const struct ttm_place *place)
421 {
422 struct ttm_resource *res = bo->resource;
423 struct ttm_device *bdev = bo->bdev;
424
425 dma_resv_assert_held(bo->base.resv);
426 if (bo->resource->mem_type == TTM_PL_SYSTEM)
427 return true;
428
429 /* Don't evict this BO if it's outside of the
430 * requested placement range
431 */
432 return ttm_resource_intersects(bdev, res, place, bo->base.size);
433 }
434 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
435
436 /**
437 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
438 * @bdev: The ttm device.
439 * @man: The manager whose bo to evict.
440 * @ctx: The TTM operation ctx governing the eviction.
441 *
442 * Return: 0 if successful or the resource disappeared. Negative error code on error.
443 */
ttm_bo_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,struct ttm_operation_ctx * ctx)444 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
445 struct ttm_operation_ctx *ctx)
446 {
447 struct ttm_resource_cursor cursor;
448 struct ttm_buffer_object *bo;
449 struct ttm_resource *res;
450 unsigned int mem_type;
451 int ret = 0;
452
453 spin_lock(&bdev->lru_lock);
454 ttm_resource_cursor_init(&cursor, man);
455 res = ttm_resource_manager_first(&cursor);
456 ttm_resource_cursor_fini(&cursor);
457 if (!res) {
458 ret = -ENOENT;
459 goto out_no_ref;
460 }
461 bo = res->bo;
462 if (!ttm_bo_get_unless_zero(bo))
463 goto out_no_ref;
464 mem_type = res->mem_type;
465 spin_unlock(&bdev->lru_lock);
466 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
467 if (ret)
468 goto out_no_lock;
469 if (!bo->resource || bo->resource->mem_type != mem_type)
470 goto out_bo_moved;
471
472 if (bo->deleted) {
473 ret = ttm_bo_wait_ctx(bo, ctx);
474 if (!ret)
475 ttm_bo_cleanup_memtype_use(bo);
476 } else {
477 ret = ttm_bo_evict(bo, ctx);
478 }
479 out_bo_moved:
480 dma_resv_unlock(bo->base.resv);
481 out_no_lock:
482 ttm_bo_put(bo);
483 return ret;
484
485 out_no_ref:
486 spin_unlock(&bdev->lru_lock);
487 return ret;
488 }
489
490 /**
491 * struct ttm_bo_evict_walk - Parameters for the evict walk.
492 */
493 struct ttm_bo_evict_walk {
494 /** @walk: The walk base parameters. */
495 struct ttm_lru_walk walk;
496 /** @place: The place passed to the resource allocation. */
497 const struct ttm_place *place;
498 /** @evictor: The buffer object we're trying to make room for. */
499 struct ttm_buffer_object *evictor;
500 /** @res: The allocated resource if any. */
501 struct ttm_resource **res;
502 /** @evicted: Number of successful evictions. */
503 unsigned long evicted;
504
505 /** @limit_pool: Which pool limit we should test against */
506 struct dmem_cgroup_pool_state *limit_pool;
507 /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
508 bool try_low;
509 /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
510 bool hit_low;
511 };
512
ttm_bo_evict_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)513 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
514 {
515 struct ttm_bo_evict_walk *evict_walk =
516 container_of(walk, typeof(*evict_walk), walk);
517 s64 lret;
518
519 if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
520 evict_walk->try_low, &evict_walk->hit_low))
521 return 0;
522
523 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
524 return 0;
525
526 if (bo->deleted) {
527 lret = ttm_bo_wait_ctx(bo, walk->ctx);
528 if (!lret)
529 ttm_bo_cleanup_memtype_use(bo);
530 } else {
531 lret = ttm_bo_evict(bo, walk->ctx);
532 }
533
534 if (lret)
535 goto out;
536
537 evict_walk->evicted++;
538 if (evict_walk->res)
539 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
540 evict_walk->res, NULL);
541 if (lret == 0)
542 return 1;
543 out:
544 /* Errors that should terminate the walk. */
545 if (lret == -ENOSPC)
546 return -EBUSY;
547
548 return lret;
549 }
550
551 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
552 .process_bo = ttm_bo_evict_cb,
553 };
554
ttm_bo_evict_alloc(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_buffer_object * evictor,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket,struct ttm_resource ** res,struct dmem_cgroup_pool_state * limit_pool)555 static int ttm_bo_evict_alloc(struct ttm_device *bdev,
556 struct ttm_resource_manager *man,
557 const struct ttm_place *place,
558 struct ttm_buffer_object *evictor,
559 struct ttm_operation_ctx *ctx,
560 struct ww_acquire_ctx *ticket,
561 struct ttm_resource **res,
562 struct dmem_cgroup_pool_state *limit_pool)
563 {
564 struct ttm_bo_evict_walk evict_walk = {
565 .walk = {
566 .ops = &ttm_evict_walk_ops,
567 .ctx = ctx,
568 .ticket = ticket,
569 },
570 .place = place,
571 .evictor = evictor,
572 .res = res,
573 .limit_pool = limit_pool,
574 };
575 s64 lret;
576
577 evict_walk.walk.trylock_only = true;
578 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
579
580 /* One more attempt if we hit low limit? */
581 if (!lret && evict_walk.hit_low) {
582 evict_walk.try_low = true;
583 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
584 }
585 if (lret || !ticket)
586 goto out;
587
588 /* Reset low limit */
589 evict_walk.try_low = evict_walk.hit_low = false;
590 /* If ticket-locking, repeat while making progress. */
591 evict_walk.walk.trylock_only = false;
592
593 retry:
594 do {
595 /* The walk may clear the evict_walk.walk.ticket field */
596 evict_walk.walk.ticket = ticket;
597 evict_walk.evicted = 0;
598 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
599 } while (!lret && evict_walk.evicted);
600
601 /* We hit the low limit? Try once more */
602 if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
603 evict_walk.try_low = true;
604 goto retry;
605 }
606 out:
607 if (lret < 0)
608 return lret;
609 if (lret == 0)
610 return -EBUSY;
611 return 0;
612 }
613
614 /**
615 * ttm_bo_pin - Pin the buffer object.
616 * @bo: The buffer object to pin
617 *
618 * Make sure the buffer is not evicted any more during memory pressure.
619 * @bo must be unpinned again by calling ttm_bo_unpin().
620 */
ttm_bo_pin(struct ttm_buffer_object * bo)621 void ttm_bo_pin(struct ttm_buffer_object *bo)
622 {
623 dma_resv_assert_held(bo->base.resv);
624 WARN_ON_ONCE(!kref_read(&bo->kref));
625 spin_lock(&bo->bdev->lru_lock);
626 if (bo->resource)
627 ttm_resource_del_bulk_move(bo->resource, bo);
628 if (!bo->pin_count++ && bo->resource)
629 ttm_resource_move_to_lru_tail(bo->resource);
630 spin_unlock(&bo->bdev->lru_lock);
631 }
632 EXPORT_SYMBOL(ttm_bo_pin);
633
634 /**
635 * ttm_bo_unpin - Unpin the buffer object.
636 * @bo: The buffer object to unpin
637 *
638 * Allows the buffer object to be evicted again during memory pressure.
639 */
ttm_bo_unpin(struct ttm_buffer_object * bo)640 void ttm_bo_unpin(struct ttm_buffer_object *bo)
641 {
642 dma_resv_assert_held(bo->base.resv);
643 WARN_ON_ONCE(!kref_read(&bo->kref));
644 if (WARN_ON_ONCE(!bo->pin_count))
645 return;
646
647 spin_lock(&bo->bdev->lru_lock);
648 if (!--bo->pin_count && bo->resource) {
649 ttm_resource_add_bulk_move(bo->resource, bo);
650 ttm_resource_move_to_lru_tail(bo->resource);
651 }
652 spin_unlock(&bo->bdev->lru_lock);
653 }
654 EXPORT_SYMBOL(ttm_bo_unpin);
655
656 /*
657 * Add the last move fence to the BO as kernel dependency and reserve a new
658 * fence slot.
659 */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,bool no_wait_gpu)660 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
661 struct ttm_resource_manager *man,
662 bool no_wait_gpu)
663 {
664 struct dma_fence *fence;
665 int ret;
666
667 spin_lock(&man->move_lock);
668 fence = dma_fence_get(man->move);
669 spin_unlock(&man->move_lock);
670
671 if (!fence)
672 return 0;
673
674 if (no_wait_gpu) {
675 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
676 dma_fence_put(fence);
677 return ret;
678 }
679
680 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
681
682 ret = dma_resv_reserve_fences(bo->base.resv, 1);
683 dma_fence_put(fence);
684 return ret;
685 }
686
687 /**
688 * ttm_bo_alloc_resource - Allocate backing store for a BO
689 *
690 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
691 * @placement: Proposed new placement for the buffer object
692 * @ctx: if and how to sleep, lock buffers and alloc memory
693 * @force_space: If we should evict buffers to force space
694 * @res: The resulting struct ttm_resource.
695 *
696 * Allocates a resource for the buffer object pointed to by @bo, using the
697 * placement flags in @placement, potentially evicting other buffer objects when
698 * @force_space is true.
699 * This function may sleep while waiting for resources to become available.
700 * Returns:
701 * -EBUSY: No space available (only if no_wait == true).
702 * -ENOSPC: Could not allocate space for the buffer object, either due to
703 * fragmentation or concurrent allocators.
704 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
705 */
ttm_bo_alloc_resource(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx,bool force_space,struct ttm_resource ** res)706 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
707 struct ttm_placement *placement,
708 struct ttm_operation_ctx *ctx,
709 bool force_space,
710 struct ttm_resource **res)
711 {
712 struct ttm_device *bdev = bo->bdev;
713 struct ww_acquire_ctx *ticket;
714 int i, ret;
715
716 ticket = dma_resv_locking_ctx(bo->base.resv);
717 ret = dma_resv_reserve_fences(bo->base.resv, 1);
718 if (unlikely(ret))
719 return ret;
720
721 for (i = 0; i < placement->num_placement; ++i) {
722 const struct ttm_place *place = &placement->placement[i];
723 struct dmem_cgroup_pool_state *limit_pool = NULL;
724 struct ttm_resource_manager *man;
725 bool may_evict;
726
727 man = ttm_manager_type(bdev, place->mem_type);
728 if (!man || !ttm_resource_manager_used(man))
729 continue;
730
731 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
732 TTM_PL_FLAG_FALLBACK))
733 continue;
734
735 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
736 ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
737 if (ret) {
738 if (ret != -ENOSPC && ret != -EAGAIN) {
739 dmem_cgroup_pool_state_put(limit_pool);
740 return ret;
741 }
742 if (!may_evict) {
743 dmem_cgroup_pool_state_put(limit_pool);
744 continue;
745 }
746
747 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
748 ticket, res, limit_pool);
749 dmem_cgroup_pool_state_put(limit_pool);
750 if (ret == -EBUSY)
751 continue;
752 if (ret)
753 return ret;
754 }
755
756 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
757 if (unlikely(ret)) {
758 ttm_resource_free(bo, res);
759 if (ret == -EBUSY)
760 continue;
761
762 return ret;
763 }
764 return 0;
765 }
766
767 return -ENOSPC;
768 }
769
770 /*
771 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
772 *
773 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
774 * @placement: Proposed new placement for the buffer object
775 * @res: The resulting struct ttm_resource.
776 * @ctx: if and how to sleep, lock buffers and alloc memory
777 *
778 * Tries both idle allocation and forcefully eviction of buffers. See
779 * ttm_bo_alloc_resource for details.
780 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** res,struct ttm_operation_ctx * ctx)781 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
782 struct ttm_placement *placement,
783 struct ttm_resource **res,
784 struct ttm_operation_ctx *ctx)
785 {
786 bool force_space = false;
787 int ret;
788
789 do {
790 ret = ttm_bo_alloc_resource(bo, placement, ctx,
791 force_space, res);
792 force_space = !force_space;
793 } while (ret == -ENOSPC && force_space);
794
795 return ret;
796 }
797 EXPORT_SYMBOL(ttm_bo_mem_space);
798
799 /**
800 * ttm_bo_validate
801 *
802 * @bo: The buffer object.
803 * @placement: Proposed placement for the buffer object.
804 * @ctx: validation parameters.
805 *
806 * Changes placement and caching policy of the buffer object
807 * according proposed placement.
808 * Returns
809 * -EINVAL on invalid proposed placement.
810 * -ENOMEM on out-of-memory condition.
811 * -EBUSY if no_wait is true and buffer busy.
812 * -ERESTARTSYS if interrupted by a signal.
813 */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)814 int ttm_bo_validate(struct ttm_buffer_object *bo,
815 struct ttm_placement *placement,
816 struct ttm_operation_ctx *ctx)
817 {
818 struct ttm_resource *res;
819 struct ttm_place hop;
820 bool force_space;
821 int ret;
822
823 dma_resv_assert_held(bo->base.resv);
824
825 /*
826 * Remove the backing store if no placement is given.
827 */
828 if (!placement->num_placement)
829 return ttm_bo_pipeline_gutting(bo);
830
831 force_space = false;
832 do {
833 /* Check whether we need to move buffer. */
834 if (bo->resource &&
835 ttm_resource_compatible(bo->resource, placement,
836 force_space))
837 return 0;
838
839 /* Moving of pinned BOs is forbidden */
840 if (bo->pin_count)
841 return -EINVAL;
842
843 /*
844 * Determine where to move the buffer.
845 *
846 * If driver determines move is going to need
847 * an extra step then it will return -EMULTIHOP
848 * and the buffer will be moved to the temporary
849 * stop and the driver will be called to make
850 * the second hop.
851 */
852 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
853 &res);
854 force_space = !force_space;
855 if (ret == -ENOSPC)
856 continue;
857 if (ret)
858 return ret;
859
860 bounce:
861 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
862 if (ret == -EMULTIHOP) {
863 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
864 /* try and move to final place now. */
865 if (!ret)
866 goto bounce;
867 }
868 if (ret) {
869 ttm_resource_free(bo, &res);
870 return ret;
871 }
872
873 } while (ret && force_space);
874
875 /* For backward compatibility with userspace */
876 if (ret == -ENOSPC)
877 return -ENOMEM;
878
879 /*
880 * We might need to add a TTM.
881 */
882 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
883 ret = ttm_tt_create(bo, true);
884 if (ret)
885 return ret;
886 }
887 return 0;
888 }
889 EXPORT_SYMBOL(ttm_bo_validate);
890
891 /**
892 * ttm_bo_init_reserved
893 *
894 * @bdev: Pointer to a ttm_device struct.
895 * @bo: Pointer to a ttm_buffer_object to be initialized.
896 * @type: Requested type of buffer object.
897 * @placement: Initial placement for buffer object.
898 * @alignment: Data alignment in pages.
899 * @ctx: TTM operation context for memory allocation.
900 * @sg: Scatter-gather table.
901 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
902 * @destroy: Destroy function. Use NULL for kfree().
903 *
904 * This function initializes a pre-allocated struct ttm_buffer_object.
905 * As this object may be part of a larger structure, this function,
906 * together with the @destroy function, enables driver-specific objects
907 * derived from a ttm_buffer_object.
908 *
909 * On successful return, the caller owns an object kref to @bo. The kref and
910 * list_kref are usually set to 1, but note that in some situations, other
911 * tasks may already be holding references to @bo as well.
912 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
913 * and it is the caller's responsibility to call ttm_bo_unreserve.
914 *
915 * If a failure occurs, the function will call the @destroy function. Thus,
916 * after a failure, dereferencing @bo is illegal and will likely cause memory
917 * corruption.
918 *
919 * Returns
920 * -ENOMEM: Out of memory.
921 * -EINVAL: Invalid placement flags.
922 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
923 */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))924 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
925 enum ttm_bo_type type, struct ttm_placement *placement,
926 uint32_t alignment, struct ttm_operation_ctx *ctx,
927 struct sg_table *sg, struct dma_resv *resv,
928 void (*destroy) (struct ttm_buffer_object *))
929 {
930 int ret;
931
932 kref_init(&bo->kref);
933 bo->bdev = bdev;
934 bo->type = type;
935 bo->page_alignment = alignment;
936 bo->destroy = destroy;
937 bo->pin_count = 0;
938 bo->sg = sg;
939 bo->bulk_move = NULL;
940 if (resv)
941 bo->base.resv = resv;
942 else
943 bo->base.resv = &bo->base._resv;
944 atomic_inc(&ttm_glob.bo_count);
945
946 /*
947 * For ttm_bo_type_device buffers, allocate
948 * address space from the device.
949 */
950 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
951 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
952 PFN_UP(bo->base.size));
953 if (ret)
954 goto err_put;
955 }
956
957 /* passed reservation objects should already be locked,
958 * since otherwise lockdep will be angered in radeon.
959 */
960 if (!resv)
961 WARN_ON(!dma_resv_trylock(bo->base.resv));
962 else
963 dma_resv_assert_held(resv);
964
965 ret = ttm_bo_validate(bo, placement, ctx);
966 if (unlikely(ret))
967 goto err_unlock;
968
969 return 0;
970
971 err_unlock:
972 if (!resv)
973 dma_resv_unlock(bo->base.resv);
974
975 err_put:
976 ttm_bo_put(bo);
977 return ret;
978 }
979 EXPORT_SYMBOL(ttm_bo_init_reserved);
980
981 /**
982 * ttm_bo_init_validate
983 *
984 * @bdev: Pointer to a ttm_device struct.
985 * @bo: Pointer to a ttm_buffer_object to be initialized.
986 * @type: Requested type of buffer object.
987 * @placement: Initial placement for buffer object.
988 * @alignment: Data alignment in pages.
989 * @interruptible: If needing to sleep to wait for GPU resources,
990 * sleep interruptible.
991 * pinned in physical memory. If this behaviour is not desired, this member
992 * holds a pointer to a persistent shmem object. Typically, this would
993 * point to the shmem object backing a GEM object if TTM is used to back a
994 * GEM user interface.
995 * @sg: Scatter-gather table.
996 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
997 * @destroy: Destroy function. Use NULL for kfree().
998 *
999 * This function initializes a pre-allocated struct ttm_buffer_object.
1000 * As this object may be part of a larger structure, this function,
1001 * together with the @destroy function,
1002 * enables driver-specific objects derived from a ttm_buffer_object.
1003 *
1004 * On successful return, the caller owns an object kref to @bo. The kref and
1005 * list_kref are usually set to 1, but note that in some situations, other
1006 * tasks may already be holding references to @bo as well.
1007 *
1008 * If a failure occurs, the function will call the @destroy function, Thus,
1009 * after a failure, dereferencing @bo is illegal and will likely cause memory
1010 * corruption.
1011 *
1012 * Returns
1013 * -ENOMEM: Out of memory.
1014 * -EINVAL: Invalid placement flags.
1015 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1016 */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))1017 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1018 enum ttm_bo_type type, struct ttm_placement *placement,
1019 uint32_t alignment, bool interruptible,
1020 struct sg_table *sg, struct dma_resv *resv,
1021 void (*destroy) (struct ttm_buffer_object *))
1022 {
1023 struct ttm_operation_ctx ctx = { interruptible, false };
1024 int ret;
1025
1026 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1027 sg, resv, destroy);
1028 if (ret)
1029 return ret;
1030
1031 if (!resv)
1032 ttm_bo_unreserve(bo);
1033
1034 return 0;
1035 }
1036 EXPORT_SYMBOL(ttm_bo_init_validate);
1037
1038 /*
1039 * buffer object vm functions.
1040 */
1041
1042 /**
1043 * ttm_bo_unmap_virtual
1044 *
1045 * @bo: tear down the virtual mappings for this BO
1046 */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1047 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1048 {
1049 struct ttm_device *bdev = bo->bdev;
1050
1051 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1052 ttm_mem_io_free(bdev, bo->resource);
1053 }
1054 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1055
1056 /**
1057 * ttm_bo_wait_ctx - wait for buffer idle.
1058 *
1059 * @bo: The buffer object.
1060 * @ctx: defines how to wait
1061 *
1062 * Waits for the buffer to be idle. Used timeout depends on the context.
1063 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1064 * zero on success.
1065 */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1066 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1067 {
1068 long ret;
1069
1070 if (ctx->no_wait_gpu) {
1071 if (dma_resv_test_signaled(bo->base.resv,
1072 DMA_RESV_USAGE_BOOKKEEP))
1073 return 0;
1074 else
1075 return -EBUSY;
1076 }
1077
1078 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1079 ctx->interruptible, 15 * HZ);
1080 if (unlikely(ret < 0))
1081 return ret;
1082 if (unlikely(ret == 0))
1083 return -EBUSY;
1084 return 0;
1085 }
1086 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1087
1088 /**
1089 * struct ttm_bo_swapout_walk - Parameters for the swapout walk
1090 */
1091 struct ttm_bo_swapout_walk {
1092 /** @walk: The walk base parameters. */
1093 struct ttm_lru_walk walk;
1094 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
1095 gfp_t gfp_flags;
1096 /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
1097 /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
1098 bool hit_low, evict_low;
1099 };
1100
1101 static s64
ttm_bo_swapout_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)1102 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
1103 {
1104 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1105 struct ttm_bo_swapout_walk *swapout_walk =
1106 container_of(walk, typeof(*swapout_walk), walk);
1107 struct ttm_operation_ctx *ctx = walk->ctx;
1108 s64 ret;
1109
1110 /*
1111 * While the bo may already reside in SYSTEM placement, set
1112 * SYSTEM as new placement to cover also the move further below.
1113 * The driver may use the fact that we're moving from SYSTEM
1114 * as an indication that we're about to swap out.
1115 */
1116 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
1117 ret = -EBUSY;
1118 goto out;
1119 }
1120
1121 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1122 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1123 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
1124 ret = -EBUSY;
1125 goto out;
1126 }
1127
1128 if (bo->deleted) {
1129 pgoff_t num_pages = bo->ttm->num_pages;
1130
1131 ret = ttm_bo_wait_ctx(bo, ctx);
1132 if (ret)
1133 goto out;
1134
1135 ttm_bo_cleanup_memtype_use(bo);
1136 ret = num_pages;
1137 goto out;
1138 }
1139
1140 /*
1141 * Move to system cached
1142 */
1143 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1144 struct ttm_resource *evict_mem;
1145 struct ttm_place hop;
1146
1147 memset(&hop, 0, sizeof(hop));
1148 place.mem_type = TTM_PL_SYSTEM;
1149 ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
1150 if (ret)
1151 goto out;
1152
1153 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1154 if (ret) {
1155 WARN(ret == -EMULTIHOP,
1156 "Unexpected multihop in swapout - likely driver bug.\n");
1157 ttm_resource_free(bo, &evict_mem);
1158 goto out;
1159 }
1160 }
1161
1162 /*
1163 * Make sure BO is idle.
1164 */
1165 ret = ttm_bo_wait_ctx(bo, ctx);
1166 if (ret)
1167 goto out;
1168
1169 ttm_bo_unmap_virtual(bo);
1170 if (bo->bdev->funcs->swap_notify)
1171 bo->bdev->funcs->swap_notify(bo);
1172
1173 if (ttm_tt_is_populated(bo->ttm)) {
1174 spin_lock(&bo->bdev->lru_lock);
1175 ttm_resource_del_bulk_move(bo->resource, bo);
1176 spin_unlock(&bo->bdev->lru_lock);
1177
1178 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
1179
1180 spin_lock(&bo->bdev->lru_lock);
1181 if (ret)
1182 ttm_resource_add_bulk_move(bo->resource, bo);
1183 ttm_resource_move_to_lru_tail(bo->resource);
1184 spin_unlock(&bo->bdev->lru_lock);
1185 }
1186
1187 out:
1188 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1189 if (ret == -ENOMEM || ret == -ENOSPC)
1190 ret = -EBUSY;
1191
1192 return ret;
1193 }
1194
1195 const struct ttm_lru_walk_ops ttm_swap_ops = {
1196 .process_bo = ttm_bo_swapout_cb,
1197 };
1198
1199 /**
1200 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
1201 * @bdev: The ttm device.
1202 * @ctx: The ttm_operation_ctx governing the swapout operation.
1203 * @man: The resource manager whose resources / buffer objects are
1204 * goint to be swapped out.
1205 * @gfp_flags: The gfp flags used for shmem page allocations.
1206 * @target: The desired number of bytes to swap out.
1207 *
1208 * Return: The number of bytes actually swapped out, or negative error code
1209 * on error.
1210 */
ttm_bo_swapout(struct ttm_device * bdev,struct ttm_operation_ctx * ctx,struct ttm_resource_manager * man,gfp_t gfp_flags,s64 target)1211 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
1212 struct ttm_resource_manager *man, gfp_t gfp_flags,
1213 s64 target)
1214 {
1215 struct ttm_bo_swapout_walk swapout_walk = {
1216 .walk = {
1217 .ops = &ttm_swap_ops,
1218 .ctx = ctx,
1219 .trylock_only = true,
1220 },
1221 .gfp_flags = gfp_flags,
1222 };
1223
1224 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
1225 }
1226
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1227 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1228 {
1229 if (bo->ttm == NULL)
1230 return;
1231
1232 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1233 ttm_tt_destroy(bo->bdev, bo->ttm);
1234 bo->ttm = NULL;
1235 }
1236
1237 /**
1238 * ttm_bo_populate() - Ensure that a buffer object has backing pages
1239 * @bo: The buffer object
1240 * @ctx: The ttm_operation_ctx governing the operation.
1241 *
1242 * For buffer objects in a memory type whose manager uses
1243 * struct ttm_tt for backing pages, ensure those backing pages
1244 * are present and with valid content. The bo's resource is also
1245 * placed on the correct LRU list if it was previously swapped
1246 * out.
1247 *
1248 * Return: 0 if successful, negative error code on failure.
1249 * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
1250 * is set to true.
1251 */
ttm_bo_populate(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1252 int ttm_bo_populate(struct ttm_buffer_object *bo,
1253 struct ttm_operation_ctx *ctx)
1254 {
1255 struct ttm_tt *tt = bo->ttm;
1256 bool swapped;
1257 int ret;
1258
1259 dma_resv_assert_held(bo->base.resv);
1260
1261 if (!tt)
1262 return 0;
1263
1264 swapped = ttm_tt_is_swapped(tt);
1265 ret = ttm_tt_populate(bo->bdev, tt, ctx);
1266 if (ret)
1267 return ret;
1268
1269 if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
1270 bo->resource) {
1271 spin_lock(&bo->bdev->lru_lock);
1272 ttm_resource_add_bulk_move(bo->resource, bo);
1273 ttm_resource_move_to_lru_tail(bo->resource);
1274 spin_unlock(&bo->bdev->lru_lock);
1275 }
1276
1277 return 0;
1278 }
1279 EXPORT_SYMBOL(ttm_bo_populate);
1280