Lines Matching refs:glob
44 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
95 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
100 (unsigned long) atomic_read(&glob->bo_count));
124 atomic_dec(&bo->glob->bo_count); in ttm_bo_release_list()
130 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); in ttm_bo_release_list()
148 ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0); in ttm_bo_wait_unreserved_locked()
173 list_add_tail(&bo->swap, &bo->glob->swap_lru); in ttm_bo_add_to_lru()
281 struct ttm_bo_global *glob = bo->glob; in ttm_bo_reserve() local
285 mtx_lock(&bo->glob->lru_lock); in ttm_bo_reserve()
290 mtx_unlock(&glob->lru_lock); in ttm_bo_reserve()
293 mtx_unlock(&bo->glob->lru_lock); in ttm_bo_reserve()
335 struct ttm_bo_global *glob = bo->glob; in ttm_bo_reserve_slowpath() local
338 mtx_lock(&glob->lru_lock); in ttm_bo_reserve_slowpath()
342 mtx_unlock(&glob->lru_lock); in ttm_bo_reserve_slowpath()
345 mtx_unlock(&glob->lru_lock); in ttm_bo_reserve_slowpath()
358 struct ttm_bo_global *glob = bo->glob; in ttm_bo_unreserve() local
360 mtx_lock(&glob->lru_lock); in ttm_bo_unreserve()
362 mtx_unlock(&glob->lru_lock); in ttm_bo_unreserve()
371 struct ttm_bo_global *glob = bo->glob; in ttm_bo_add_ttm() local
387 page_flags, glob->dummy_read_page); in ttm_bo_add_ttm()
394 glob->dummy_read_page); in ttm_bo_add_ttm()
551 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_or_queue() local
557 mtx_lock(&glob->lru_lock); in ttm_bo_cleanup_refs_or_queue()
566 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_or_queue()
584 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_or_queue()
612 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_and_unlock() local
632 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_and_unlock()
649 mtx_lock(&glob->lru_lock); in ttm_bo_cleanup_refs_and_unlock()
661 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_and_unlock()
670 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_and_unlock()
678 mtx_unlock(&glob->lru_lock); in ttm_bo_cleanup_refs_and_unlock()
693 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_delayed_delete() local
697 mtx_lock(&glob->lru_lock); in ttm_bo_delayed_delete()
724 mtx_unlock(&glob->lru_lock); in ttm_bo_delayed_delete()
733 mtx_lock(&glob->lru_lock); in ttm_bo_delayed_delete()
739 mtx_unlock(&glob->lru_lock); in ttm_bo_delayed_delete()
863 struct ttm_bo_global *glob = bdev->glob; in ttm_mem_evict_first() local
868 mtx_lock(&glob->lru_lock); in ttm_mem_evict_first()
876 mtx_unlock(&glob->lru_lock); in ttm_mem_evict_first()
891 mtx_unlock(&glob->lru_lock); in ttm_mem_evict_first()
1229 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; in ttm_bo_init()
1262 bo->glob = bdev->glob; in ttm_bo_init()
1278 atomic_inc(&bo->glob->bo_count); in ttm_bo_init()
1364 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_force_list_clean() local
1371 mtx_lock(&glob->lru_lock); in ttm_bo_force_list_clean()
1373 mtx_unlock(&glob->lru_lock); in ttm_bo_force_list_clean()
1382 mtx_lock(&glob->lru_lock); in ttm_bo_force_list_clean()
1384 mtx_unlock(&glob->lru_lock); in ttm_bo_force_list_clean()
1469 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob) in ttm_bo_global_kobj_release() argument
1472 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); in ttm_bo_global_kobj_release()
1473 vm_page_free(glob->dummy_read_page); in ttm_bo_global_kobj_release()
1478 struct ttm_bo_global *glob = ref->object; in ttm_bo_global_release() local
1480 if (refcount_release(&glob->kobj_ref)) in ttm_bo_global_release()
1481 ttm_bo_global_kobj_release(glob); in ttm_bo_global_release()
1488 struct ttm_bo_global *glob = ref->object; in ttm_bo_global_init() local
1492 sx_init(&glob->device_list_mutex, "ttmdlm"); in ttm_bo_global_init()
1493 mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF); in ttm_bo_global_init()
1494 glob->mem_glob = bo_ref->mem_glob; in ttm_bo_global_init()
1497 glob->dummy_read_page = vm_page_alloc_noobj_contig(0, 1, 0, in ttm_bo_global_init()
1500 if (unlikely(glob->dummy_read_page == NULL)) { in ttm_bo_global_init()
1510 INIT_LIST_HEAD(&glob->swap_lru); in ttm_bo_global_init()
1511 INIT_LIST_HEAD(&glob->device_list); in ttm_bo_global_init()
1513 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); in ttm_bo_global_init()
1514 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); in ttm_bo_global_init()
1520 atomic_set(&glob->bo_count, 0); in ttm_bo_global_init()
1522 refcount_init(&glob->kobj_ref, 1); in ttm_bo_global_init()
1526 vm_page_free(glob->dummy_read_page); in ttm_bo_global_init()
1528 free(glob, M_DRM_GLOBAL); in ttm_bo_global_init()
1537 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_device_release() local
1552 sx_xlock(&glob->device_list_mutex); in ttm_bo_device_release()
1554 sx_xunlock(&glob->device_list_mutex); in ttm_bo_device_release()
1562 mtx_lock(&glob->lru_lock); in ttm_bo_device_release()
1568 mtx_unlock(&glob->lru_lock); in ttm_bo_device_release()
1579 struct ttm_bo_global *glob, in ttm_bo_device_init() argument
1608 bdev->glob = glob; in ttm_bo_device_init()
1612 sx_xlock(&glob->device_list_mutex); in ttm_bo_device_init()
1613 list_add_tail(&bdev->device_list, &glob->device_list); in ttm_bo_device_init()
1614 sx_xunlock(&glob->device_list_mutex); in ttm_bo_device_init()
1804 struct ttm_bo_global *glob = in ttm_bo_swapout() local
1811 mtx_lock(&glob->lru_lock); in ttm_bo_swapout()
1812 list_for_each_entry(bo, &glob->swap_lru, swap) { in ttm_bo_swapout()
1819 mtx_unlock(&glob->lru_lock); in ttm_bo_swapout()
1833 mtx_unlock(&glob->lru_lock); in ttm_bo_swapout()
1890 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) in ttm_bo_swapout_all()