Lines Matching refs:bo

42 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
75 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, in ttm_bo_mem_space_debug() argument
81 bo, bo->mem.num_pages, bo->mem.size >> 10, in ttm_bo_mem_space_debug()
82 bo->mem.size >> 20); in ttm_bo_mem_space_debug()
90 ttm_mem_type_debug(bo->bdev, mem_type); in ttm_bo_mem_space_debug()
109 static void ttm_bo_release_list(struct ttm_buffer_object *bo) in ttm_bo_release_list() argument
111 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release_list()
112 size_t acc_size = bo->acc_size; in ttm_bo_release_list()
114 MPASS(atomic_read(&bo->list_kref) == 0); in ttm_bo_release_list()
115 MPASS(atomic_read(&bo->kref) == 0); in ttm_bo_release_list()
116 MPASS(atomic_read(&bo->cpu_writers) == 0); in ttm_bo_release_list()
117 MPASS(bo->sync_obj == NULL); in ttm_bo_release_list()
118 MPASS(bo->mem.mm_node == NULL); in ttm_bo_release_list()
119 MPASS(list_empty(&bo->lru)); in ttm_bo_release_list()
120 MPASS(list_empty(&bo->ddestroy)); in ttm_bo_release_list()
122 if (bo->ttm) in ttm_bo_release_list()
123 ttm_tt_destroy(bo->ttm); in ttm_bo_release_list()
124 atomic_dec(&bo->glob->bo_count); in ttm_bo_release_list()
125 if (bo->destroy) in ttm_bo_release_list()
126 bo->destroy(bo); in ttm_bo_release_list()
128 free(bo, M_TTM_BO); in ttm_bo_release_list()
134 ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible) in ttm_bo_wait_unreserved_locked() argument
147 while (ttm_bo_is_reserved(bo)) { in ttm_bo_wait_unreserved_locked()
148 ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0); in ttm_bo_wait_unreserved_locked()
157 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) in ttm_bo_add_to_lru() argument
159 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_to_lru()
162 MPASS(ttm_bo_is_reserved(bo)); in ttm_bo_add_to_lru()
164 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { in ttm_bo_add_to_lru()
166 MPASS(list_empty(&bo->lru)); in ttm_bo_add_to_lru()
168 man = &bdev->man[bo->mem.mem_type]; in ttm_bo_add_to_lru()
169 list_add_tail(&bo->lru, &man->lru); in ttm_bo_add_to_lru()
170 refcount_acquire(&bo->list_kref); in ttm_bo_add_to_lru()
172 if (bo->ttm != NULL) { in ttm_bo_add_to_lru()
173 list_add_tail(&bo->swap, &bo->glob->swap_lru); in ttm_bo_add_to_lru()
174 refcount_acquire(&bo->list_kref); in ttm_bo_add_to_lru()
179 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) in ttm_bo_del_from_lru() argument
183 if (!list_empty(&bo->swap)) { in ttm_bo_del_from_lru()
184 list_del_init(&bo->swap); in ttm_bo_del_from_lru()
187 if (!list_empty(&bo->lru)) { in ttm_bo_del_from_lru()
188 list_del_init(&bo->lru); in ttm_bo_del_from_lru()
200 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, in ttm_bo_reserve_nolru() argument
206 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { in ttm_bo_reserve_nolru()
210 if (use_sequence && bo->seq_valid) { in ttm_bo_reserve_nolru()
214 if (unlikely(sequence == bo->val_seq)) in ttm_bo_reserve_nolru()
220 if (unlikely(sequence - bo->val_seq < (1U << 31))) in ttm_bo_reserve_nolru()
227 ret = ttm_bo_wait_unreserved_locked(bo, interruptible); in ttm_bo_reserve_nolru()
239 if (unlikely((bo->val_seq - sequence < (1U << 31)) in ttm_bo_reserve_nolru()
240 || !bo->seq_valid)) in ttm_bo_reserve_nolru()
253 bo->val_seq = sequence; in ttm_bo_reserve_nolru()
254 bo->seq_valid = true; in ttm_bo_reserve_nolru()
256 wakeup(bo); in ttm_bo_reserve_nolru()
258 bo->seq_valid = false; in ttm_bo_reserve_nolru()
264 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, in ttm_bo_list_ref_sub() argument
269 old = atomic_fetchadd_int(&bo->list_kref, -count); in ttm_bo_list_ref_sub()
273 ttm_bo_release_list(bo); in ttm_bo_list_ref_sub()
277 int ttm_bo_reserve(struct ttm_buffer_object *bo, in ttm_bo_reserve() argument
281 struct ttm_bo_global *glob = bo->glob; in ttm_bo_reserve()
285 mtx_lock(&bo->glob->lru_lock); in ttm_bo_reserve()
286 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, in ttm_bo_reserve()
289 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_reserve()
291 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_reserve()
293 mtx_unlock(&bo->glob->lru_lock); in ttm_bo_reserve()
298 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, in ttm_bo_reserve_slowpath_nolru() argument
304 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { in ttm_bo_reserve_slowpath_nolru()
305 if (bo->seq_valid && sequence == bo->val_seq) { in ttm_bo_reserve_slowpath_nolru()
311 ret = ttm_bo_wait_unreserved_locked(bo, interruptible); in ttm_bo_reserve_slowpath_nolru()
317 if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid) in ttm_bo_reserve_slowpath_nolru()
324 bo->val_seq = sequence; in ttm_bo_reserve_slowpath_nolru()
325 bo->seq_valid = true; in ttm_bo_reserve_slowpath_nolru()
327 wakeup(bo); in ttm_bo_reserve_slowpath_nolru()
332 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, in ttm_bo_reserve_slowpath() argument
335 struct ttm_bo_global *glob = bo->glob; in ttm_bo_reserve_slowpath()
339 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); in ttm_bo_reserve_slowpath()
341 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_reserve_slowpath()
343 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_reserve_slowpath()
349 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) in ttm_bo_unreserve_locked() argument
351 ttm_bo_add_to_lru(bo); in ttm_bo_unreserve_locked()
352 atomic_set(&bo->reserved, 0); in ttm_bo_unreserve_locked()
353 wakeup(bo); in ttm_bo_unreserve_locked()
356 void ttm_bo_unreserve(struct ttm_buffer_object *bo) in ttm_bo_unreserve() argument
358 struct ttm_bo_global *glob = bo->glob; in ttm_bo_unreserve()
361 ttm_bo_unreserve_locked(bo); in ttm_bo_unreserve()
368 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) in ttm_bo_add_ttm() argument
370 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_ttm()
371 struct ttm_bo_global *glob = bo->glob; in ttm_bo_add_ttm()
375 TTM_ASSERT_LOCKED(&bo->mutex); in ttm_bo_add_ttm()
376 bo->ttm = NULL; in ttm_bo_add_ttm()
381 switch (bo->type) { in ttm_bo_add_ttm()
386 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
388 if (unlikely(bo->ttm == NULL)) in ttm_bo_add_ttm()
392 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
395 if (unlikely(bo->ttm == NULL)) { in ttm_bo_add_ttm()
399 bo->ttm->sg = bo->sg; in ttm_bo_add_ttm()
410 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, in ttm_bo_handle_move_mem() argument
415 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_handle_move_mem()
416 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); in ttm_bo_handle_move_mem()
418 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
423 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { in ttm_bo_handle_move_mem()
427 ttm_bo_unmap_virtual_locked(bo); in ttm_bo_handle_move_mem()
436 if (bo->ttm == NULL) { in ttm_bo_handle_move_mem()
438 ret = ttm_bo_add_ttm(bo, zero); in ttm_bo_handle_move_mem()
443 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); in ttm_bo_handle_move_mem()
448 ret = ttm_tt_bind(bo->ttm, mem); in ttm_bo_handle_move_mem()
453 if (bo->mem.mem_type == TTM_PL_SYSTEM) { in ttm_bo_handle_move_mem()
455 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
456 bo->mem = *mem; in ttm_bo_handle_move_mem()
463 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
467 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); in ttm_bo_handle_move_mem()
469 ret = bdev->driver->move(bo, evict, interruptible, in ttm_bo_handle_move_mem()
472 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); in ttm_bo_handle_move_mem()
477 *mem = bo->mem; in ttm_bo_handle_move_mem()
478 bo->mem = tmp_mem; in ttm_bo_handle_move_mem()
479 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
480 bo->mem = *mem; in ttm_bo_handle_move_mem()
488 if (bo->evicted) { in ttm_bo_handle_move_mem()
489 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); in ttm_bo_handle_move_mem()
492 bo->evicted = false; in ttm_bo_handle_move_mem()
495 if (bo->mem.mm_node) { in ttm_bo_handle_move_mem()
496 bo->offset = (bo->mem.start << PAGE_SHIFT) + in ttm_bo_handle_move_mem()
497 bdev->man[bo->mem.mem_type].gpu_offset; in ttm_bo_handle_move_mem()
498 bo->cur_placement = bo->mem.placement; in ttm_bo_handle_move_mem()
500 bo->offset = 0; in ttm_bo_handle_move_mem()
505 new_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
506 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { in ttm_bo_handle_move_mem()
507 ttm_tt_unbind(bo->ttm); in ttm_bo_handle_move_mem()
508 ttm_tt_destroy(bo->ttm); in ttm_bo_handle_move_mem()
509 bo->ttm = NULL; in ttm_bo_handle_move_mem()
523 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) in ttm_bo_cleanup_memtype_use() argument
525 if (bo->bdev->driver->move_notify) in ttm_bo_cleanup_memtype_use()
526 bo->bdev->driver->move_notify(bo, NULL); in ttm_bo_cleanup_memtype_use()
528 if (bo->ttm) { in ttm_bo_cleanup_memtype_use()
529 ttm_tt_unbind(bo->ttm); in ttm_bo_cleanup_memtype_use()
530 ttm_tt_destroy(bo->ttm); in ttm_bo_cleanup_memtype_use()
531 bo->ttm = NULL; in ttm_bo_cleanup_memtype_use()
533 ttm_bo_mem_put(bo, &bo->mem); in ttm_bo_cleanup_memtype_use()
535 atomic_set(&bo->reserved, 0); in ttm_bo_cleanup_memtype_use()
536 wakeup(&bo); in ttm_bo_cleanup_memtype_use()
548 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) in ttm_bo_cleanup_refs_or_queue() argument
550 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_cleanup_refs_or_queue()
551 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_or_queue()
558 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); in ttm_bo_cleanup_refs_or_queue()
561 (void) ttm_bo_wait(bo, false, false, true); in ttm_bo_cleanup_refs_or_queue()
562 if (!ret && !bo->sync_obj) { in ttm_bo_cleanup_refs_or_queue()
564 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_cleanup_refs_or_queue()
567 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_cleanup_refs_or_queue()
569 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_cleanup_refs_or_queue()
573 if (bo->sync_obj) in ttm_bo_cleanup_refs_or_queue()
574 sync_obj = driver->sync_obj_ref(bo->sync_obj); in ttm_bo_cleanup_refs_or_queue()
578 atomic_set(&bo->reserved, 0); in ttm_bo_cleanup_refs_or_queue()
579 wakeup(bo); in ttm_bo_cleanup_refs_or_queue()
582 refcount_acquire(&bo->list_kref); in ttm_bo_cleanup_refs_or_queue()
583 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_cleanup_refs_or_queue()
606 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, in ttm_bo_cleanup_refs_and_unlock() argument
610 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_cleanup_refs_and_unlock()
612 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_and_unlock()
617 ret = ttm_bo_wait(bo, false, false, true); in ttm_bo_cleanup_refs_and_unlock()
627 sync_obj = driver->sync_obj_ref(bo->sync_obj); in ttm_bo_cleanup_refs_and_unlock()
630 atomic_set(&bo->reserved, 0); in ttm_bo_cleanup_refs_and_unlock()
631 wakeup(bo); in ttm_bo_cleanup_refs_and_unlock()
644 ret = ttm_bo_wait(bo, false, false, true); in ttm_bo_cleanup_refs_and_unlock()
650 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); in ttm_bo_cleanup_refs_and_unlock()
667 if (ret || unlikely(list_empty(&bo->ddestroy))) { in ttm_bo_cleanup_refs_and_unlock()
668 atomic_set(&bo->reserved, 0); in ttm_bo_cleanup_refs_and_unlock()
669 wakeup(bo); in ttm_bo_cleanup_refs_and_unlock()
674 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_cleanup_refs_and_unlock()
675 list_del_init(&bo->ddestroy); in ttm_bo_cleanup_refs_and_unlock()
679 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_cleanup_refs_and_unlock()
681 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_cleanup_refs_and_unlock()
756 static void ttm_bo_release(struct ttm_buffer_object *bo) in ttm_bo_release() argument
758 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release()
759 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_release()
762 if (likely(bo->vm_node != NULL)) { in ttm_bo_release()
764 &bdev->addr_space_rb, bo); in ttm_bo_release()
765 drm_mm_put_block(bo->vm_node); in ttm_bo_release()
766 bo->vm_node = NULL; in ttm_bo_release()
770 ttm_mem_io_free_vm(bo); in ttm_bo_release()
772 ttm_bo_cleanup_refs_or_queue(bo); in ttm_bo_release()
773 if (refcount_release(&bo->list_kref)) in ttm_bo_release()
774 ttm_bo_release_list(bo); in ttm_bo_release()
779 struct ttm_buffer_object *bo = *p_bo; in ttm_bo_unref() local
782 if (refcount_release(&bo->kref)) in ttm_bo_unref()
783 ttm_bo_release(bo); in ttm_bo_unref()
803 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, in ttm_bo_evict() argument
806 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_evict()
812 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); in ttm_bo_evict()
822 MPASS(ttm_bo_is_reserved(bo)); in ttm_bo_evict()
824 evict_mem = bo->mem; in ttm_bo_evict()
833 bdev->driver->evict_flags(bo, &placement); in ttm_bo_evict()
834 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, in ttm_bo_evict()
839 bo); in ttm_bo_evict()
840 ttm_bo_mem_space_debug(bo, &placement); in ttm_bo_evict()
845 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, in ttm_bo_evict()
850 ttm_bo_mem_put(bo, &evict_mem); in ttm_bo_evict()
853 bo->evicted = true; in ttm_bo_evict()
865 struct ttm_buffer_object *bo; in ttm_mem_evict_first() local
869 list_for_each_entry(bo, &man->lru, lru) { in ttm_mem_evict_first()
870 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); in ttm_mem_evict_first()
880 refcount_acquire(&bo->list_kref); in ttm_mem_evict_first()
882 if (!list_empty(&bo->ddestroy)) { in ttm_mem_evict_first()
883 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, in ttm_mem_evict_first()
885 if (refcount_release(&bo->list_kref)) in ttm_mem_evict_first()
886 ttm_bo_release_list(bo); in ttm_mem_evict_first()
890 put_count = ttm_bo_del_from_lru(bo); in ttm_mem_evict_first()
895 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_mem_evict_first()
897 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); in ttm_mem_evict_first()
898 ttm_bo_unreserve(bo); in ttm_mem_evict_first()
900 if (refcount_release(&bo->list_kref)) in ttm_mem_evict_first()
901 ttm_bo_release_list(bo); in ttm_mem_evict_first()
905 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) in ttm_bo_mem_put() argument
907 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; in ttm_bo_mem_put()
917 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, in ttm_bo_mem_force_space() argument
924 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_force_space()
929 ret = (*man->func->get_node)(man, bo, placement, mem); in ttm_bo_mem_force_space()
997 int ttm_bo_mem_space(struct ttm_buffer_object *bo, in ttm_bo_mem_space() argument
1003 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_space()
1028 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, in ttm_bo_mem_space()
1042 ret = (*man->func->get_node)(man, bo, placement, mem); in ttm_bo_mem_space()
1073 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, in ttm_bo_mem_space()
1090 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, in ttm_bo_mem_space()
1104 int ttm_bo_move_buffer(struct ttm_buffer_object *bo, in ttm_bo_move_buffer() argument
1111 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_move_buffer()
1113 MPASS(ttm_bo_is_reserved(bo)); in ttm_bo_move_buffer()
1121 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); in ttm_bo_move_buffer()
1125 mem.num_pages = bo->num_pages; in ttm_bo_move_buffer()
1127 mem.page_alignment = bo->mem.page_alignment; in ttm_bo_move_buffer()
1133 ret = ttm_bo_mem_space(bo, placement, &mem, in ttm_bo_move_buffer()
1137 ret = ttm_bo_handle_move_mem(bo, &mem, false, in ttm_bo_move_buffer()
1141 ttm_bo_mem_put(bo, &mem); in ttm_bo_move_buffer()
1165 int ttm_bo_validate(struct ttm_buffer_object *bo, in ttm_bo_validate() argument
1172 MPASS(ttm_bo_is_reserved(bo)); in ttm_bo_validate()
1176 (placement->lpfn - placement->fpfn) < bo->num_pages) in ttm_bo_validate()
1181 ret = ttm_bo_mem_compat(placement, &bo->mem); in ttm_bo_validate()
1183 ret = ttm_bo_move_buffer(bo, placement, interruptible, in ttm_bo_validate()
1192 ttm_flag_masked(&bo->mem.placement, placement->placement[ret], in ttm_bo_validate()
1198 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in ttm_bo_validate()
1199 ret = ttm_bo_add_ttm(bo, true); in ttm_bo_validate()
1206 int ttm_bo_check_placement(struct ttm_buffer_object *bo, in ttm_bo_check_placement() argument
1210 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)))); in ttm_bo_check_placement()
1216 struct ttm_buffer_object *bo, in ttm_bo_init() argument
1235 (*destroy)(bo); in ttm_bo_init()
1237 free(bo, M_TTM_BO); in ttm_bo_init()
1245 (*destroy)(bo); in ttm_bo_init()
1247 free(bo, M_TTM_BO); in ttm_bo_init()
1251 bo->destroy = destroy; in ttm_bo_init()
1253 refcount_init(&bo->kref, 1); in ttm_bo_init()
1254 refcount_init(&bo->list_kref, 1); in ttm_bo_init()
1255 atomic_set(&bo->cpu_writers, 0); in ttm_bo_init()
1256 atomic_set(&bo->reserved, 1); in ttm_bo_init()
1257 INIT_LIST_HEAD(&bo->lru); in ttm_bo_init()
1258 INIT_LIST_HEAD(&bo->ddestroy); in ttm_bo_init()
1259 INIT_LIST_HEAD(&bo->swap); in ttm_bo_init()
1260 INIT_LIST_HEAD(&bo->io_reserve_lru); in ttm_bo_init()
1261 bo->bdev = bdev; in ttm_bo_init()
1262 bo->glob = bdev->glob; in ttm_bo_init()
1263 bo->type = type; in ttm_bo_init()
1264 bo->num_pages = num_pages; in ttm_bo_init()
1265 bo->mem.size = num_pages << PAGE_SHIFT; in ttm_bo_init()
1266 bo->mem.mem_type = TTM_PL_SYSTEM; in ttm_bo_init()
1267 bo->mem.num_pages = bo->num_pages; in ttm_bo_init()
1268 bo->mem.mm_node = NULL; in ttm_bo_init()
1269 bo->mem.page_alignment = page_alignment; in ttm_bo_init()
1270 bo->mem.bus.io_reserved_vm = false; in ttm_bo_init()
1271 bo->mem.bus.io_reserved_count = 0; in ttm_bo_init()
1272 bo->priv_flags = 0; in ttm_bo_init()
1273 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); in ttm_bo_init()
1274 bo->seq_valid = false; in ttm_bo_init()
1275 bo->persistent_swap_storage = persistent_swap_storage; in ttm_bo_init()
1276 bo->acc_size = acc_size; in ttm_bo_init()
1277 bo->sg = sg; in ttm_bo_init()
1278 atomic_inc(&bo->glob->bo_count); in ttm_bo_init()
1280 ret = ttm_bo_check_placement(bo, placement); in ttm_bo_init()
1288 if (bo->type == ttm_bo_type_device || in ttm_bo_init()
1289 bo->type == ttm_bo_type_sg) { in ttm_bo_init()
1290 ret = ttm_bo_setup_vm(bo); in ttm_bo_init()
1295 ret = ttm_bo_validate(bo, placement, interruptible, false); in ttm_bo_init()
1299 ttm_bo_unreserve(bo); in ttm_bo_init()
1303 ttm_bo_unreserve(bo); in ttm_bo_init()
1304 ttm_bo_unref(&bo); in ttm_bo_init()
1345 struct ttm_buffer_object *bo; in ttm_bo_create() local
1349 bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO); in ttm_bo_create()
1351 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, in ttm_bo_create()
1355 *p_bo = bo; in ttm_bo_create()
1644 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) in ttm_bo_unmap_virtual_locked() argument
1647 ttm_bo_release_mmap(bo); in ttm_bo_unmap_virtual_locked()
1648 ttm_mem_io_free_vm(bo); in ttm_bo_unmap_virtual_locked()
1651 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) in ttm_bo_unmap_virtual() argument
1653 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual()
1654 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_unmap_virtual()
1657 ttm_bo_unmap_virtual_locked(bo); in ttm_bo_unmap_virtual()
1661 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) in ttm_bo_vm_insert_rb() argument
1663 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_vm_insert_rb()
1666 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); in ttm_bo_vm_insert_rb()
1680 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) in ttm_bo_setup_vm() argument
1682 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_setup_vm()
1691 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, in ttm_bo_setup_vm()
1692 bo->mem.num_pages, 0, 0); in ttm_bo_setup_vm()
1694 if (unlikely(bo->vm_node == NULL)) { in ttm_bo_setup_vm()
1699 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, in ttm_bo_setup_vm()
1700 bo->mem.num_pages, 0); in ttm_bo_setup_vm()
1702 if (unlikely(bo->vm_node == NULL)) { in ttm_bo_setup_vm()
1707 ttm_bo_vm_insert_rb(bo); in ttm_bo_setup_vm()
1709 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; in ttm_bo_setup_vm()
1717 int ttm_bo_wait(struct ttm_buffer_object *bo, in ttm_bo_wait() argument
1720 struct ttm_bo_driver *driver = bo->bdev->driver; in ttm_bo_wait()
1721 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_wait()
1725 if (likely(bo->sync_obj == NULL)) in ttm_bo_wait()
1728 while (bo->sync_obj) { in ttm_bo_wait()
1730 if (driver->sync_obj_signaled(bo->sync_obj)) { in ttm_bo_wait()
1731 void *tmp_obj = bo->sync_obj; in ttm_bo_wait()
1732 bo->sync_obj = NULL; in ttm_bo_wait()
1733 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); in ttm_bo_wait()
1743 sync_obj = driver->sync_obj_ref(bo->sync_obj); in ttm_bo_wait()
1753 if (likely(bo->sync_obj == sync_obj)) { in ttm_bo_wait()
1754 void *tmp_obj = bo->sync_obj; in ttm_bo_wait()
1755 bo->sync_obj = NULL; in ttm_bo_wait()
1757 &bo->priv_flags); in ttm_bo_wait()
1771 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) in ttm_bo_synccpu_write_grab() argument
1773 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_synccpu_write_grab()
1780 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); in ttm_bo_synccpu_write_grab()
1784 ret = ttm_bo_wait(bo, false, true, no_wait); in ttm_bo_synccpu_write_grab()
1787 atomic_inc(&bo->cpu_writers); in ttm_bo_synccpu_write_grab()
1788 ttm_bo_unreserve(bo); in ttm_bo_synccpu_write_grab()
1792 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) in ttm_bo_synccpu_write_release() argument
1794 atomic_dec(&bo->cpu_writers); in ttm_bo_synccpu_write_release()
1806 struct ttm_buffer_object *bo; in ttm_bo_swapout() local
1812 list_for_each_entry(bo, &glob->swap_lru, swap) { in ttm_bo_swapout()
1813 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); in ttm_bo_swapout()
1823 refcount_acquire(&bo->list_kref); in ttm_bo_swapout()
1825 if (!list_empty(&bo->ddestroy)) { in ttm_bo_swapout()
1826 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); in ttm_bo_swapout()
1827 if (refcount_release(&bo->list_kref)) in ttm_bo_swapout()
1828 ttm_bo_release_list(bo); in ttm_bo_swapout()
1832 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_swapout()
1835 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_swapout()
1841 mtx_lock(&bo->bdev->fence_lock); in ttm_bo_swapout()
1842 ret = ttm_bo_wait(bo, false, false, false); in ttm_bo_swapout()
1843 mtx_unlock(&bo->bdev->fence_lock); in ttm_bo_swapout()
1848 if ((bo->mem.placement & swap_placement) != swap_placement) { in ttm_bo_swapout()
1851 evict_mem = bo->mem; in ttm_bo_swapout()
1856 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, in ttm_bo_swapout()
1862 ttm_bo_unmap_virtual(bo); in ttm_bo_swapout()
1869 if (bo->bdev->driver->swap_notify) in ttm_bo_swapout()
1870 bo->bdev->driver->swap_notify(bo); in ttm_bo_swapout()
1872 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); in ttm_bo_swapout()
1881 atomic_set(&bo->reserved, 0); in ttm_bo_swapout()
1882 wakeup(bo); in ttm_bo_swapout()
1883 if (refcount_release(&bo->list_kref)) in ttm_bo_swapout()
1884 ttm_bo_release_list(bo); in ttm_bo_swapout()