1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <linux/jiffies.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/file.h>
43 #include <linux/module.h>
44 #include <linux/atomic.h>
45 #include <linux/cgroup_dmem.h>
46 #include <linux/dma-resv.h>
47
48 #include "ttm_module.h"
49
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)50 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
51 struct ttm_placement *placement)
52 {
53 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
54 struct ttm_resource_manager *man;
55 int i, mem_type;
56
57 for (i = 0; i < placement->num_placement; i++) {
58 mem_type = placement->placement[i].mem_type;
59 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
60 i, placement->placement[i].flags, mem_type);
61 man = ttm_manager_type(bo->bdev, mem_type);
62 ttm_resource_manager_debug(man, &p);
63 }
64 }
65
66 /**
67 * ttm_bo_move_to_lru_tail
68 *
69 * @bo: The buffer object.
70 *
71 * Move this BO to the tail of all lru lists used to lookup and reserve an
72 * object. This function must be called with struct ttm_global::lru_lock
73 * held, and is used to make a BO less likely to be considered for eviction.
74 */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)75 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
76 {
77 dma_resv_assert_held(bo->base.resv);
78
79 if (bo->resource)
80 ttm_resource_move_to_lru_tail(bo->resource);
81 }
82 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
83
84 /**
85 * ttm_bo_set_bulk_move - update BOs bulk move object
86 *
87 * @bo: The buffer object.
88 * @bulk: bulk move structure
89 *
90 * Update the BOs bulk move object, making sure that resources are added/removed
91 * as well. A bulk move allows to move many resource on the LRU at once,
92 * resulting in much less overhead of maintaining the LRU.
93 * The only requirement is that the resources stay together on the LRU and are
94 * never separated. This is enforces by setting the bulk_move structure on a BO.
95 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
96 * their LRU list.
97 */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)98 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
99 struct ttm_lru_bulk_move *bulk)
100 {
101 dma_resv_assert_held(bo->base.resv);
102
103 if (bo->bulk_move == bulk)
104 return;
105
106 spin_lock(&bo->bdev->lru_lock);
107 if (bo->resource)
108 ttm_resource_del_bulk_move(bo->resource, bo);
109 bo->bulk_move = bulk;
110 if (bo->resource)
111 ttm_resource_add_bulk_move(bo->resource, bo);
112 spin_unlock(&bo->bdev->lru_lock);
113 }
114 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
115
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)116 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
117 struct ttm_resource *mem, bool evict,
118 struct ttm_operation_ctx *ctx,
119 struct ttm_place *hop)
120 {
121 struct ttm_device *bdev = bo->bdev;
122 bool old_use_tt, new_use_tt;
123 int ret;
124
125 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
126 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
127
128 ttm_bo_unmap_virtual(bo);
129
130 /*
131 * Create and bind a ttm if required.
132 */
133
134 if (new_use_tt) {
135 /* Zero init the new TTM structure if the old location should
136 * have used one as well.
137 */
138 ret = ttm_tt_create(bo, old_use_tt);
139 if (ret)
140 goto out_err;
141
142 if (mem->mem_type != TTM_PL_SYSTEM) {
143 ret = ttm_bo_populate(bo, ctx);
144 if (ret)
145 goto out_err;
146 }
147 }
148
149 ret = dma_resv_reserve_fences(bo->base.resv, 1);
150 if (ret)
151 goto out_err;
152
153 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
154 if (ret) {
155 if (ret == -EMULTIHOP)
156 return ret;
157 goto out_err;
158 }
159
160 ctx->bytes_moved += bo->base.size;
161 return 0;
162
163 out_err:
164 if (!old_use_tt)
165 ttm_bo_tt_destroy(bo);
166
167 return ret;
168 }
169
170 /*
171 * Call bo::reserved.
172 * Will release GPU memory type usage on destruction.
173 * This is the place to put in driver specific hooks to release
174 * driver private resources.
175 * Will release the bo::reserved lock.
176 */
177
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
179 {
180 if (bo->bdev->funcs->delete_mem_notify)
181 bo->bdev->funcs->delete_mem_notify(bo);
182
183 ttm_bo_tt_destroy(bo);
184 ttm_resource_free(bo, &bo->resource);
185 }
186
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
188 {
189 int r;
190
191 if (bo->base.resv == &bo->base._resv)
192 return 0;
193
194 BUG_ON(!dma_resv_trylock(&bo->base._resv));
195
196 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
197 dma_resv_unlock(&bo->base._resv);
198 if (r)
199 return r;
200
201 if (bo->type != ttm_bo_type_sg) {
202 /* This works because the BO is about to be destroyed and nobody
203 * reference it any more. The only tricky case is the trylock on
204 * the resv object while holding the lru_lock.
205 */
206 spin_lock(&bo->bdev->lru_lock);
207 bo->base.resv = &bo->base._resv;
208 spin_unlock(&bo->bdev->lru_lock);
209 }
210
211 return r;
212 }
213
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
215 {
216 struct dma_resv *resv = &bo->base._resv;
217 struct dma_resv_iter cursor;
218 struct dma_fence *fence;
219
220 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
221 dma_resv_for_each_fence_unlocked(&cursor, fence) {
222 if (!fence->ops->signaled)
223 dma_fence_enable_sw_signaling(fence);
224 }
225 dma_resv_iter_end(&cursor);
226 }
227
228 /*
229 * Block for the dma_resv object to become idle, lock the buffer and clean up
230 * the resource and tt object.
231 */
ttm_bo_delayed_delete(struct work_struct * work)232 static void ttm_bo_delayed_delete(struct work_struct *work)
233 {
234 struct ttm_buffer_object *bo;
235
236 bo = container_of(work, typeof(*bo), delayed_delete);
237
238 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
239 MAX_SCHEDULE_TIMEOUT);
240 dma_resv_lock(bo->base.resv, NULL);
241 ttm_bo_cleanup_memtype_use(bo);
242 dma_resv_unlock(bo->base.resv);
243 ttm_bo_put(bo);
244 }
245
ttm_bo_release(struct kref * kref)246 static void ttm_bo_release(struct kref *kref)
247 {
248 struct ttm_buffer_object *bo =
249 container_of(kref, struct ttm_buffer_object, kref);
250 struct ttm_device *bdev = bo->bdev;
251 int ret;
252
253 WARN_ON_ONCE(bo->pin_count);
254 WARN_ON_ONCE(bo->bulk_move);
255
256 if (!bo->deleted) {
257 ret = ttm_bo_individualize_resv(bo);
258 if (ret) {
259 /* Last resort, if we fail to allocate memory for the
260 * fences block for the BO to become idle
261 */
262 dma_resv_wait_timeout(bo->base.resv,
263 DMA_RESV_USAGE_BOOKKEEP, false,
264 30 * HZ);
265 }
266
267 if (bo->bdev->funcs->release_notify)
268 bo->bdev->funcs->release_notify(bo);
269
270 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
271 ttm_mem_io_free(bdev, bo->resource);
272
273 if (!dma_resv_test_signaled(bo->base.resv,
274 DMA_RESV_USAGE_BOOKKEEP) ||
275 (want_init_on_free() && (bo->ttm != NULL)) ||
276 bo->type == ttm_bo_type_sg ||
277 !dma_resv_trylock(bo->base.resv)) {
278 /* The BO is not idle, resurrect it for delayed destroy */
279 ttm_bo_flush_all_fences(bo);
280 bo->deleted = true;
281
282 spin_lock(&bo->bdev->lru_lock);
283
284 /*
285 * Make pinned bos immediately available to
286 * shrinkers, now that they are queued for
287 * destruction.
288 *
289 * FIXME: QXL is triggering this. Can be removed when the
290 * driver is fixed.
291 */
292 if (bo->pin_count) {
293 bo->pin_count = 0;
294 ttm_resource_move_to_lru_tail(bo->resource);
295 }
296
297 kref_init(&bo->kref);
298 spin_unlock(&bo->bdev->lru_lock);
299
300 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
301
302 /* Schedule the worker on the closest NUMA node. This
303 * improves performance since system memory might be
304 * cleared on free and that is best done on a CPU core
305 * close to it.
306 */
307 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
308 return;
309 }
310
311 ttm_bo_cleanup_memtype_use(bo);
312 dma_resv_unlock(bo->base.resv);
313 }
314
315 atomic_dec(&ttm_glob.bo_count);
316 bo->destroy(bo);
317 }
318
319 /**
320 * ttm_bo_put
321 *
322 * @bo: The buffer object.
323 *
324 * Unreference a buffer object.
325 */
ttm_bo_put(struct ttm_buffer_object * bo)326 void ttm_bo_put(struct ttm_buffer_object *bo)
327 {
328 kref_put(&bo->kref, ttm_bo_release);
329 }
330 EXPORT_SYMBOL(ttm_bo_put);
331
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_place * hop)332 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
333 struct ttm_operation_ctx *ctx,
334 struct ttm_place *hop)
335 {
336 struct ttm_placement hop_placement;
337 struct ttm_resource *hop_mem;
338 int ret;
339
340 hop_placement.num_placement = 1;
341 hop_placement.placement = hop;
342
343 /* find space in the bounce domain */
344 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
345 if (ret)
346 return ret;
347 /* move to the bounce domain */
348 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
349 if (ret) {
350 ttm_resource_free(bo, &hop_mem);
351 return ret;
352 }
353 return 0;
354 }
355
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)356 static int ttm_bo_evict(struct ttm_buffer_object *bo,
357 struct ttm_operation_ctx *ctx)
358 {
359 struct ttm_device *bdev = bo->bdev;
360 struct ttm_resource *evict_mem;
361 struct ttm_placement placement;
362 struct ttm_place hop;
363 int ret = 0;
364
365 memset(&hop, 0, sizeof(hop));
366
367 dma_resv_assert_held(bo->base.resv);
368
369 placement.num_placement = 0;
370 bdev->funcs->evict_flags(bo, &placement);
371
372 if (!placement.num_placement) {
373 ret = ttm_bo_wait_ctx(bo, ctx);
374 if (ret)
375 return ret;
376
377 /*
378 * Since we've already synced, this frees backing store
379 * immediately.
380 */
381 return ttm_bo_pipeline_gutting(bo);
382 }
383
384 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
385 if (ret) {
386 if (ret != -ERESTARTSYS) {
387 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
388 bo);
389 ttm_bo_mem_space_debug(bo, &placement);
390 }
391 goto out;
392 }
393
394 do {
395 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
396 if (ret != -EMULTIHOP)
397 break;
398
399 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
400 } while (!ret);
401
402 if (ret) {
403 ttm_resource_free(bo, &evict_mem);
404 if (ret != -ERESTARTSYS && ret != -EINTR)
405 pr_err("Buffer eviction failed\n");
406 }
407 out:
408 return ret;
409 }
410
411 /**
412 * ttm_bo_eviction_valuable
413 *
414 * @bo: The buffer object to evict
415 * @place: the placement we need to make room for
416 *
417 * Check if it is valuable to evict the BO to make room for the given placement.
418 */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)419 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
420 const struct ttm_place *place)
421 {
422 struct ttm_resource *res = bo->resource;
423 struct ttm_device *bdev = bo->bdev;
424
425 dma_resv_assert_held(bo->base.resv);
426 if (bo->resource->mem_type == TTM_PL_SYSTEM)
427 return true;
428
429 /* Don't evict this BO if it's outside of the
430 * requested placement range
431 */
432 return ttm_resource_intersects(bdev, res, place, bo->base.size);
433 }
434 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
435
436 /**
437 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
438 * @bdev: The ttm device.
439 * @man: The manager whose bo to evict.
440 * @ctx: The TTM operation ctx governing the eviction.
441 *
442 * Return: 0 if successful or the resource disappeared. Negative error code on error.
443 */
ttm_bo_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,struct ttm_operation_ctx * ctx)444 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
445 struct ttm_operation_ctx *ctx)
446 {
447 struct ttm_resource_cursor cursor;
448 struct ttm_buffer_object *bo;
449 struct ttm_resource *res;
450 unsigned int mem_type;
451 int ret = 0;
452
453 spin_lock(&bdev->lru_lock);
454 res = ttm_resource_manager_first(man, &cursor);
455 ttm_resource_cursor_fini(&cursor);
456 if (!res) {
457 ret = -ENOENT;
458 goto out_no_ref;
459 }
460 bo = res->bo;
461 if (!ttm_bo_get_unless_zero(bo))
462 goto out_no_ref;
463 mem_type = res->mem_type;
464 spin_unlock(&bdev->lru_lock);
465 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
466 if (ret)
467 goto out_no_lock;
468 if (!bo->resource || bo->resource->mem_type != mem_type)
469 goto out_bo_moved;
470
471 if (bo->deleted) {
472 ret = ttm_bo_wait_ctx(bo, ctx);
473 if (!ret)
474 ttm_bo_cleanup_memtype_use(bo);
475 } else {
476 ret = ttm_bo_evict(bo, ctx);
477 }
478 out_bo_moved:
479 dma_resv_unlock(bo->base.resv);
480 out_no_lock:
481 ttm_bo_put(bo);
482 return ret;
483
484 out_no_ref:
485 spin_unlock(&bdev->lru_lock);
486 return ret;
487 }
488
489 /**
490 * struct ttm_bo_evict_walk - Parameters for the evict walk.
491 */
492 struct ttm_bo_evict_walk {
493 /** @walk: The walk base parameters. */
494 struct ttm_lru_walk walk;
495 /** @place: The place passed to the resource allocation. */
496 const struct ttm_place *place;
497 /** @evictor: The buffer object we're trying to make room for. */
498 struct ttm_buffer_object *evictor;
499 /** @res: The allocated resource if any. */
500 struct ttm_resource **res;
501 /** @evicted: Number of successful evictions. */
502 unsigned long evicted;
503
504 /** @limit_pool: Which pool limit we should test against */
505 struct dmem_cgroup_pool_state *limit_pool;
506 /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
507 bool try_low;
508 /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
509 bool hit_low;
510 };
511
ttm_bo_evict_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)512 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
513 {
514 struct ttm_bo_evict_walk *evict_walk =
515 container_of(walk, typeof(*evict_walk), walk);
516 s64 lret;
517
518 if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
519 evict_walk->try_low, &evict_walk->hit_low))
520 return 0;
521
522 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
523 return 0;
524
525 if (bo->deleted) {
526 lret = ttm_bo_wait_ctx(bo, walk->ctx);
527 if (!lret)
528 ttm_bo_cleanup_memtype_use(bo);
529 } else {
530 lret = ttm_bo_evict(bo, walk->ctx);
531 }
532
533 if (lret)
534 goto out;
535
536 evict_walk->evicted++;
537 if (evict_walk->res)
538 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
539 evict_walk->res, NULL);
540 if (lret == 0)
541 return 1;
542 out:
543 /* Errors that should terminate the walk. */
544 if (lret == -ENOSPC)
545 return -EBUSY;
546
547 return lret;
548 }
549
550 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
551 .process_bo = ttm_bo_evict_cb,
552 };
553
ttm_bo_evict_alloc(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_buffer_object * evictor,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket,struct ttm_resource ** res,struct dmem_cgroup_pool_state * limit_pool)554 static int ttm_bo_evict_alloc(struct ttm_device *bdev,
555 struct ttm_resource_manager *man,
556 const struct ttm_place *place,
557 struct ttm_buffer_object *evictor,
558 struct ttm_operation_ctx *ctx,
559 struct ww_acquire_ctx *ticket,
560 struct ttm_resource **res,
561 struct dmem_cgroup_pool_state *limit_pool)
562 {
563 struct ttm_bo_evict_walk evict_walk = {
564 .walk = {
565 .ops = &ttm_evict_walk_ops,
566 .ctx = ctx,
567 .ticket = ticket,
568 },
569 .place = place,
570 .evictor = evictor,
571 .res = res,
572 .limit_pool = limit_pool,
573 };
574 s64 lret;
575
576 evict_walk.walk.trylock_only = true;
577 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
578
579 /* One more attempt if we hit low limit? */
580 if (!lret && evict_walk.hit_low) {
581 evict_walk.try_low = true;
582 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
583 }
584 if (lret || !ticket)
585 goto out;
586
587 /* Reset low limit */
588 evict_walk.try_low = evict_walk.hit_low = false;
589 /* If ticket-locking, repeat while making progress. */
590 evict_walk.walk.trylock_only = false;
591
592 retry:
593 do {
594 /* The walk may clear the evict_walk.walk.ticket field */
595 evict_walk.walk.ticket = ticket;
596 evict_walk.evicted = 0;
597 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
598 } while (!lret && evict_walk.evicted);
599
600 /* We hit the low limit? Try once more */
601 if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
602 evict_walk.try_low = true;
603 goto retry;
604 }
605 out:
606 if (lret < 0)
607 return lret;
608 if (lret == 0)
609 return -EBUSY;
610 return 0;
611 }
612
613 /**
614 * ttm_bo_pin - Pin the buffer object.
615 * @bo: The buffer object to pin
616 *
617 * Make sure the buffer is not evicted any more during memory pressure.
618 * @bo must be unpinned again by calling ttm_bo_unpin().
619 */
ttm_bo_pin(struct ttm_buffer_object * bo)620 void ttm_bo_pin(struct ttm_buffer_object *bo)
621 {
622 dma_resv_assert_held(bo->base.resv);
623 WARN_ON_ONCE(!kref_read(&bo->kref));
624 spin_lock(&bo->bdev->lru_lock);
625 if (bo->resource)
626 ttm_resource_del_bulk_move(bo->resource, bo);
627 if (!bo->pin_count++ && bo->resource)
628 ttm_resource_move_to_lru_tail(bo->resource);
629 spin_unlock(&bo->bdev->lru_lock);
630 }
631 EXPORT_SYMBOL(ttm_bo_pin);
632
633 /**
634 * ttm_bo_unpin - Unpin the buffer object.
635 * @bo: The buffer object to unpin
636 *
637 * Allows the buffer object to be evicted again during memory pressure.
638 */
ttm_bo_unpin(struct ttm_buffer_object * bo)639 void ttm_bo_unpin(struct ttm_buffer_object *bo)
640 {
641 dma_resv_assert_held(bo->base.resv);
642 WARN_ON_ONCE(!kref_read(&bo->kref));
643 if (WARN_ON_ONCE(!bo->pin_count))
644 return;
645
646 spin_lock(&bo->bdev->lru_lock);
647 if (!--bo->pin_count && bo->resource) {
648 ttm_resource_add_bulk_move(bo->resource, bo);
649 ttm_resource_move_to_lru_tail(bo->resource);
650 }
651 spin_unlock(&bo->bdev->lru_lock);
652 }
653 EXPORT_SYMBOL(ttm_bo_unpin);
654
655 /*
656 * Add the last move fence to the BO as kernel dependency and reserve a new
657 * fence slot.
658 */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,bool no_wait_gpu)659 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
660 struct ttm_resource_manager *man,
661 bool no_wait_gpu)
662 {
663 struct dma_fence *fence;
664 int ret;
665
666 spin_lock(&man->move_lock);
667 fence = dma_fence_get(man->move);
668 spin_unlock(&man->move_lock);
669
670 if (!fence)
671 return 0;
672
673 if (no_wait_gpu) {
674 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
675 dma_fence_put(fence);
676 return ret;
677 }
678
679 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
680
681 ret = dma_resv_reserve_fences(bo->base.resv, 1);
682 dma_fence_put(fence);
683 return ret;
684 }
685
686 /**
687 * ttm_bo_alloc_resource - Allocate backing store for a BO
688 *
689 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
690 * @placement: Proposed new placement for the buffer object
691 * @ctx: if and how to sleep, lock buffers and alloc memory
692 * @force_space: If we should evict buffers to force space
693 * @res: The resulting struct ttm_resource.
694 *
695 * Allocates a resource for the buffer object pointed to by @bo, using the
696 * placement flags in @placement, potentially evicting other buffer objects when
697 * @force_space is true.
698 * This function may sleep while waiting for resources to become available.
699 * Returns:
700 * -EBUSY: No space available (only if no_wait == true).
701 * -ENOSPC: Could not allocate space for the buffer object, either due to
702 * fragmentation or concurrent allocators.
703 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
704 */
ttm_bo_alloc_resource(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx,bool force_space,struct ttm_resource ** res)705 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
706 struct ttm_placement *placement,
707 struct ttm_operation_ctx *ctx,
708 bool force_space,
709 struct ttm_resource **res)
710 {
711 struct ttm_device *bdev = bo->bdev;
712 struct ww_acquire_ctx *ticket;
713 int i, ret;
714
715 ticket = dma_resv_locking_ctx(bo->base.resv);
716 ret = dma_resv_reserve_fences(bo->base.resv, 1);
717 if (unlikely(ret))
718 return ret;
719
720 for (i = 0; i < placement->num_placement; ++i) {
721 const struct ttm_place *place = &placement->placement[i];
722 struct dmem_cgroup_pool_state *limit_pool = NULL;
723 struct ttm_resource_manager *man;
724 bool may_evict;
725
726 man = ttm_manager_type(bdev, place->mem_type);
727 if (!man || !ttm_resource_manager_used(man))
728 continue;
729
730 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
731 TTM_PL_FLAG_FALLBACK))
732 continue;
733
734 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
735 ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
736 if (ret) {
737 if (ret != -ENOSPC && ret != -EAGAIN) {
738 dmem_cgroup_pool_state_put(limit_pool);
739 return ret;
740 }
741 if (!may_evict) {
742 dmem_cgroup_pool_state_put(limit_pool);
743 continue;
744 }
745
746 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
747 ticket, res, limit_pool);
748 dmem_cgroup_pool_state_put(limit_pool);
749 if (ret == -EBUSY)
750 continue;
751 if (ret)
752 return ret;
753 }
754
755 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
756 if (unlikely(ret)) {
757 ttm_resource_free(bo, res);
758 if (ret == -EBUSY)
759 continue;
760
761 return ret;
762 }
763 return 0;
764 }
765
766 return -ENOSPC;
767 }
768
769 /*
770 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
771 *
772 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
773 * @placement: Proposed new placement for the buffer object
774 * @res: The resulting struct ttm_resource.
775 * @ctx: if and how to sleep, lock buffers and alloc memory
776 *
777 * Tries both idle allocation and forcefully eviction of buffers. See
778 * ttm_bo_alloc_resource for details.
779 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** res,struct ttm_operation_ctx * ctx)780 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
781 struct ttm_placement *placement,
782 struct ttm_resource **res,
783 struct ttm_operation_ctx *ctx)
784 {
785 bool force_space = false;
786 int ret;
787
788 do {
789 ret = ttm_bo_alloc_resource(bo, placement, ctx,
790 force_space, res);
791 force_space = !force_space;
792 } while (ret == -ENOSPC && force_space);
793
794 return ret;
795 }
796 EXPORT_SYMBOL(ttm_bo_mem_space);
797
798 /**
799 * ttm_bo_validate
800 *
801 * @bo: The buffer object.
802 * @placement: Proposed placement for the buffer object.
803 * @ctx: validation parameters.
804 *
805 * Changes placement and caching policy of the buffer object
806 * according proposed placement.
807 * Returns
808 * -EINVAL on invalid proposed placement.
809 * -ENOMEM on out-of-memory condition.
810 * -EBUSY if no_wait is true and buffer busy.
811 * -ERESTARTSYS if interrupted by a signal.
812 */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)813 int ttm_bo_validate(struct ttm_buffer_object *bo,
814 struct ttm_placement *placement,
815 struct ttm_operation_ctx *ctx)
816 {
817 struct ttm_resource *res;
818 struct ttm_place hop;
819 bool force_space;
820 int ret;
821
822 dma_resv_assert_held(bo->base.resv);
823
824 /*
825 * Remove the backing store if no placement is given.
826 */
827 if (!placement->num_placement)
828 return ttm_bo_pipeline_gutting(bo);
829
830 force_space = false;
831 do {
832 /* Check whether we need to move buffer. */
833 if (bo->resource &&
834 ttm_resource_compatible(bo->resource, placement,
835 force_space))
836 return 0;
837
838 /* Moving of pinned BOs is forbidden */
839 if (bo->pin_count)
840 return -EINVAL;
841
842 /*
843 * Determine where to move the buffer.
844 *
845 * If driver determines move is going to need
846 * an extra step then it will return -EMULTIHOP
847 * and the buffer will be moved to the temporary
848 * stop and the driver will be called to make
849 * the second hop.
850 */
851 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
852 &res);
853 force_space = !force_space;
854 if (ret == -ENOSPC)
855 continue;
856 if (ret)
857 return ret;
858
859 bounce:
860 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
861 if (ret == -EMULTIHOP) {
862 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
863 /* try and move to final place now. */
864 if (!ret)
865 goto bounce;
866 }
867 if (ret) {
868 ttm_resource_free(bo, &res);
869 return ret;
870 }
871
872 } while (ret && force_space);
873
874 /* For backward compatibility with userspace */
875 if (ret == -ENOSPC)
876 return -ENOMEM;
877
878 /*
879 * We might need to add a TTM.
880 */
881 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
882 ret = ttm_tt_create(bo, true);
883 if (ret)
884 return ret;
885 }
886 return 0;
887 }
888 EXPORT_SYMBOL(ttm_bo_validate);
889
890 /**
891 * ttm_bo_init_reserved
892 *
893 * @bdev: Pointer to a ttm_device struct.
894 * @bo: Pointer to a ttm_buffer_object to be initialized.
895 * @type: Requested type of buffer object.
896 * @placement: Initial placement for buffer object.
897 * @alignment: Data alignment in pages.
898 * @ctx: TTM operation context for memory allocation.
899 * @sg: Scatter-gather table.
900 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
901 * @destroy: Destroy function. Use NULL for kfree().
902 *
903 * This function initializes a pre-allocated struct ttm_buffer_object.
904 * As this object may be part of a larger structure, this function,
905 * together with the @destroy function, enables driver-specific objects
906 * derived from a ttm_buffer_object.
907 *
908 * On successful return, the caller owns an object kref to @bo. The kref and
909 * list_kref are usually set to 1, but note that in some situations, other
910 * tasks may already be holding references to @bo as well.
911 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
912 * and it is the caller's responsibility to call ttm_bo_unreserve.
913 *
914 * If a failure occurs, the function will call the @destroy function. Thus,
915 * after a failure, dereferencing @bo is illegal and will likely cause memory
916 * corruption.
917 *
918 * Returns
919 * -ENOMEM: Out of memory.
920 * -EINVAL: Invalid placement flags.
921 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
922 */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))923 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
924 enum ttm_bo_type type, struct ttm_placement *placement,
925 uint32_t alignment, struct ttm_operation_ctx *ctx,
926 struct sg_table *sg, struct dma_resv *resv,
927 void (*destroy) (struct ttm_buffer_object *))
928 {
929 int ret;
930
931 kref_init(&bo->kref);
932 bo->bdev = bdev;
933 bo->type = type;
934 bo->page_alignment = alignment;
935 bo->destroy = destroy;
936 bo->pin_count = 0;
937 bo->sg = sg;
938 bo->bulk_move = NULL;
939 if (resv)
940 bo->base.resv = resv;
941 else
942 bo->base.resv = &bo->base._resv;
943 atomic_inc(&ttm_glob.bo_count);
944
945 /*
946 * For ttm_bo_type_device buffers, allocate
947 * address space from the device.
948 */
949 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
950 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
951 PFN_UP(bo->base.size));
952 if (ret)
953 goto err_put;
954 }
955
956 /* passed reservation objects should already be locked,
957 * since otherwise lockdep will be angered in radeon.
958 */
959 if (!resv)
960 WARN_ON(!dma_resv_trylock(bo->base.resv));
961 else
962 dma_resv_assert_held(resv);
963
964 ret = ttm_bo_validate(bo, placement, ctx);
965 if (unlikely(ret))
966 goto err_unlock;
967
968 return 0;
969
970 err_unlock:
971 if (!resv)
972 dma_resv_unlock(bo->base.resv);
973
974 err_put:
975 ttm_bo_put(bo);
976 return ret;
977 }
978 EXPORT_SYMBOL(ttm_bo_init_reserved);
979
980 /**
981 * ttm_bo_init_validate
982 *
983 * @bdev: Pointer to a ttm_device struct.
984 * @bo: Pointer to a ttm_buffer_object to be initialized.
985 * @type: Requested type of buffer object.
986 * @placement: Initial placement for buffer object.
987 * @alignment: Data alignment in pages.
988 * @interruptible: If needing to sleep to wait for GPU resources,
989 * sleep interruptible.
990 * pinned in physical memory. If this behaviour is not desired, this member
991 * holds a pointer to a persistent shmem object. Typically, this would
992 * point to the shmem object backing a GEM object if TTM is used to back a
993 * GEM user interface.
994 * @sg: Scatter-gather table.
995 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
996 * @destroy: Destroy function. Use NULL for kfree().
997 *
998 * This function initializes a pre-allocated struct ttm_buffer_object.
999 * As this object may be part of a larger structure, this function,
1000 * together with the @destroy function,
1001 * enables driver-specific objects derived from a ttm_buffer_object.
1002 *
1003 * On successful return, the caller owns an object kref to @bo. The kref and
1004 * list_kref are usually set to 1, but note that in some situations, other
1005 * tasks may already be holding references to @bo as well.
1006 *
1007 * If a failure occurs, the function will call the @destroy function, Thus,
1008 * after a failure, dereferencing @bo is illegal and will likely cause memory
1009 * corruption.
1010 *
1011 * Returns
1012 * -ENOMEM: Out of memory.
1013 * -EINVAL: Invalid placement flags.
1014 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1015 */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))1016 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1017 enum ttm_bo_type type, struct ttm_placement *placement,
1018 uint32_t alignment, bool interruptible,
1019 struct sg_table *sg, struct dma_resv *resv,
1020 void (*destroy) (struct ttm_buffer_object *))
1021 {
1022 struct ttm_operation_ctx ctx = { interruptible, false };
1023 int ret;
1024
1025 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1026 sg, resv, destroy);
1027 if (ret)
1028 return ret;
1029
1030 if (!resv)
1031 ttm_bo_unreserve(bo);
1032
1033 return 0;
1034 }
1035 EXPORT_SYMBOL(ttm_bo_init_validate);
1036
1037 /*
1038 * buffer object vm functions.
1039 */
1040
1041 /**
1042 * ttm_bo_unmap_virtual
1043 *
1044 * @bo: tear down the virtual mappings for this BO
1045 */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1046 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1047 {
1048 struct ttm_device *bdev = bo->bdev;
1049
1050 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1051 ttm_mem_io_free(bdev, bo->resource);
1052 }
1053 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1054
1055 /**
1056 * ttm_bo_wait_ctx - wait for buffer idle.
1057 *
1058 * @bo: The buffer object.
1059 * @ctx: defines how to wait
1060 *
1061 * Waits for the buffer to be idle. Used timeout depends on the context.
1062 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1063 * zero on success.
1064 */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1065 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1066 {
1067 long ret;
1068
1069 if (ctx->no_wait_gpu) {
1070 if (dma_resv_test_signaled(bo->base.resv,
1071 DMA_RESV_USAGE_BOOKKEEP))
1072 return 0;
1073 else
1074 return -EBUSY;
1075 }
1076
1077 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1078 ctx->interruptible, 15 * HZ);
1079 if (unlikely(ret < 0))
1080 return ret;
1081 if (unlikely(ret == 0))
1082 return -EBUSY;
1083 return 0;
1084 }
1085 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1086
1087 /**
1088 * struct ttm_bo_swapout_walk - Parameters for the swapout walk
1089 */
1090 struct ttm_bo_swapout_walk {
1091 /** @walk: The walk base parameters. */
1092 struct ttm_lru_walk walk;
1093 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
1094 gfp_t gfp_flags;
1095
1096 bool hit_low, evict_low;
1097 };
1098
1099 static s64
ttm_bo_swapout_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)1100 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
1101 {
1102 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1103 struct ttm_bo_swapout_walk *swapout_walk =
1104 container_of(walk, typeof(*swapout_walk), walk);
1105 struct ttm_operation_ctx *ctx = walk->ctx;
1106 s64 ret;
1107
1108 /*
1109 * While the bo may already reside in SYSTEM placement, set
1110 * SYSTEM as new placement to cover also the move further below.
1111 * The driver may use the fact that we're moving from SYSTEM
1112 * as an indication that we're about to swap out.
1113 */
1114 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
1115 ret = -EBUSY;
1116 goto out;
1117 }
1118
1119 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1120 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1121 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
1122 ret = -EBUSY;
1123 goto out;
1124 }
1125
1126 if (bo->deleted) {
1127 pgoff_t num_pages = bo->ttm->num_pages;
1128
1129 ret = ttm_bo_wait_ctx(bo, ctx);
1130 if (ret)
1131 goto out;
1132
1133 ttm_bo_cleanup_memtype_use(bo);
1134 ret = num_pages;
1135 goto out;
1136 }
1137
1138 /*
1139 * Move to system cached
1140 */
1141 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1142 struct ttm_resource *evict_mem;
1143 struct ttm_place hop;
1144
1145 memset(&hop, 0, sizeof(hop));
1146 place.mem_type = TTM_PL_SYSTEM;
1147 ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
1148 if (ret)
1149 goto out;
1150
1151 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1152 if (ret) {
1153 WARN(ret == -EMULTIHOP,
1154 "Unexpected multihop in swapout - likely driver bug.\n");
1155 ttm_resource_free(bo, &evict_mem);
1156 goto out;
1157 }
1158 }
1159
1160 /*
1161 * Make sure BO is idle.
1162 */
1163 ret = ttm_bo_wait_ctx(bo, ctx);
1164 if (ret)
1165 goto out;
1166
1167 ttm_bo_unmap_virtual(bo);
1168 if (bo->bdev->funcs->swap_notify)
1169 bo->bdev->funcs->swap_notify(bo);
1170
1171 if (ttm_tt_is_populated(bo->ttm)) {
1172 spin_lock(&bo->bdev->lru_lock);
1173 ttm_resource_del_bulk_move(bo->resource, bo);
1174 spin_unlock(&bo->bdev->lru_lock);
1175
1176 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
1177
1178 spin_lock(&bo->bdev->lru_lock);
1179 if (ret)
1180 ttm_resource_add_bulk_move(bo->resource, bo);
1181 ttm_resource_move_to_lru_tail(bo->resource);
1182 spin_unlock(&bo->bdev->lru_lock);
1183 }
1184
1185 out:
1186 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1187 if (ret == -ENOMEM || ret == -ENOSPC)
1188 ret = -EBUSY;
1189
1190 return ret;
1191 }
1192
1193 const struct ttm_lru_walk_ops ttm_swap_ops = {
1194 .process_bo = ttm_bo_swapout_cb,
1195 };
1196
1197 /**
1198 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
1199 * @bdev: The ttm device.
1200 * @ctx: The ttm_operation_ctx governing the swapout operation.
1201 * @man: The resource manager whose resources / buffer objects are
1202 * goint to be swapped out.
1203 * @gfp_flags: The gfp flags used for shmem page allocations.
1204 * @target: The desired number of bytes to swap out.
1205 *
1206 * Return: The number of bytes actually swapped out, or negative error code
1207 * on error.
1208 */
ttm_bo_swapout(struct ttm_device * bdev,struct ttm_operation_ctx * ctx,struct ttm_resource_manager * man,gfp_t gfp_flags,s64 target)1209 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
1210 struct ttm_resource_manager *man, gfp_t gfp_flags,
1211 s64 target)
1212 {
1213 struct ttm_bo_swapout_walk swapout_walk = {
1214 .walk = {
1215 .ops = &ttm_swap_ops,
1216 .ctx = ctx,
1217 .trylock_only = true,
1218 },
1219 .gfp_flags = gfp_flags,
1220 };
1221
1222 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
1223 }
1224
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1225 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1226 {
1227 if (bo->ttm == NULL)
1228 return;
1229
1230 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1231 ttm_tt_destroy(bo->bdev, bo->ttm);
1232 bo->ttm = NULL;
1233 }
1234
1235 /**
1236 * ttm_bo_populate() - Ensure that a buffer object has backing pages
1237 * @bo: The buffer object
1238 * @ctx: The ttm_operation_ctx governing the operation.
1239 *
1240 * For buffer objects in a memory type whose manager uses
1241 * struct ttm_tt for backing pages, ensure those backing pages
1242 * are present and with valid content. The bo's resource is also
1243 * placed on the correct LRU list if it was previously swapped
1244 * out.
1245 *
1246 * Return: 0 if successful, negative error code on failure.
1247 * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
1248 * is set to true.
1249 */
ttm_bo_populate(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1250 int ttm_bo_populate(struct ttm_buffer_object *bo,
1251 struct ttm_operation_ctx *ctx)
1252 {
1253 struct ttm_tt *tt = bo->ttm;
1254 bool swapped;
1255 int ret;
1256
1257 dma_resv_assert_held(bo->base.resv);
1258
1259 if (!tt)
1260 return 0;
1261
1262 swapped = ttm_tt_is_swapped(tt);
1263 ret = ttm_tt_populate(bo->bdev, tt, ctx);
1264 if (ret)
1265 return ret;
1266
1267 if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
1268 bo->resource) {
1269 spin_lock(&bo->bdev->lru_lock);
1270 ttm_resource_add_bulk_move(bo->resource, bo);
1271 ttm_resource_move_to_lru_tail(bo->resource);
1272 spin_unlock(&bo->bdev->lru_lock);
1273 }
1274
1275 return 0;
1276 }
1277 EXPORT_SYMBOL(ttm_bo_populate);
1278