xref: /linux/drivers/gpu/drm/ttm/ttm_device.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 
3 /*
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * Copyright 2020 Advanced Micro Devices, Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors: Christian König
26  */
27 
28 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
29 
30 #include <linux/mm.h>
31 
32 #include <drm/ttm/ttm_bo.h>
33 #include <drm/ttm/ttm_device.h>
34 #include <drm/ttm/ttm_tt.h>
35 #include <drm/ttm/ttm_placement.h>
36 
37 #include "ttm_module.h"
38 
39 /*
40  * ttm_global_mutex - protecting the global state
41  */
42 static DEFINE_MUTEX(ttm_global_mutex);
43 static unsigned ttm_glob_use_count;
44 struct ttm_global ttm_glob;
45 EXPORT_SYMBOL(ttm_glob);
46 
47 struct dentry *ttm_debugfs_root;
48 
49 static void ttm_global_release(void)
50 {
51 	struct ttm_global *glob = &ttm_glob;
52 
53 	mutex_lock(&ttm_global_mutex);
54 	if (--ttm_glob_use_count > 0)
55 		goto out;
56 
57 	ttm_pool_mgr_fini();
58 	debugfs_remove(ttm_debugfs_root);
59 
60 	__free_page(glob->dummy_read_page);
61 	memset(glob, 0, sizeof(*glob));
62 out:
63 	mutex_unlock(&ttm_global_mutex);
64 }
65 
66 static int ttm_global_init(void)
67 {
68 	struct ttm_global *glob = &ttm_glob;
69 	unsigned long num_pages, num_dma32;
70 	struct sysinfo si;
71 	int ret = 0;
72 
73 	mutex_lock(&ttm_global_mutex);
74 	if (++ttm_glob_use_count > 1)
75 		goto out;
76 
77 	si_meminfo(&si);
78 
79 	ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
80 	if (IS_ERR(ttm_debugfs_root)) {
81 		ttm_debugfs_root = NULL;
82 	}
83 
84 	/* Limit the number of pages in the pool to about 50% of the total
85 	 * system memory.
86 	 */
87 	num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
88 	num_pages /= 2;
89 
90 	/* But for DMA32 we limit ourself to only use 2GiB maximum. */
91 	num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
92 		>> PAGE_SHIFT;
93 	num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
94 
95 	ttm_pool_mgr_init(num_pages);
96 	ttm_tt_mgr_init(num_pages, num_dma32);
97 
98 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
99 					   __GFP_NOWARN);
100 
101 	/* Retry without GFP_DMA32 for platforms DMA32 is not available */
102 	if (unlikely(glob->dummy_read_page == NULL)) {
103 		glob->dummy_read_page = alloc_page(__GFP_ZERO);
104 		if (unlikely(glob->dummy_read_page == NULL)) {
105 			ret = -ENOMEM;
106 			goto out;
107 		}
108 		pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
109 	}
110 
111 	INIT_LIST_HEAD(&glob->device_list);
112 	atomic_set(&glob->bo_count, 0);
113 
114 	debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
115 				&glob->bo_count);
116 out:
117 	if (ret && ttm_debugfs_root)
118 		debugfs_remove(ttm_debugfs_root);
119 	if (ret)
120 		--ttm_glob_use_count;
121 	mutex_unlock(&ttm_global_mutex);
122 	return ret;
123 }
124 
125 /*
126  * A buffer object shrink method that tries to swap out the first
127  * buffer object on the global::swap_lru list.
128  */
129 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
130 {
131 	struct ttm_global *glob = &ttm_glob;
132 	struct ttm_device *bdev;
133 	int ret = 0;
134 
135 	mutex_lock(&ttm_global_mutex);
136 	list_for_each_entry(bdev, &glob->device_list, device_list) {
137 		ret = ttm_device_swapout(bdev, ctx, gfp_flags);
138 		if (ret > 0) {
139 			list_move_tail(&bdev->device_list, &glob->device_list);
140 			break;
141 		}
142 	}
143 	mutex_unlock(&ttm_global_mutex);
144 	return ret;
145 }
146 
147 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
148 		       gfp_t gfp_flags)
149 {
150 	struct ttm_resource_cursor cursor;
151 	struct ttm_resource_manager *man;
152 	struct ttm_resource *res;
153 	unsigned i;
154 	int ret;
155 
156 	spin_lock(&bdev->lru_lock);
157 	for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
158 		man = ttm_manager_type(bdev, i);
159 		if (!man || !man->use_tt)
160 			continue;
161 
162 		ttm_resource_manager_for_each_res(man, &cursor, res) {
163 			struct ttm_buffer_object *bo = res->bo;
164 			uint32_t num_pages;
165 
166 			if (!bo || bo->resource != res)
167 				continue;
168 
169 			num_pages = PFN_UP(bo->base.size);
170 			ret = ttm_bo_swapout(bo, ctx, gfp_flags);
171 			/* ttm_bo_swapout has dropped the lru_lock */
172 			if (!ret)
173 				return num_pages;
174 			if (ret != -EBUSY)
175 				return ret;
176 		}
177 	}
178 	spin_unlock(&bdev->lru_lock);
179 	return 0;
180 }
181 EXPORT_SYMBOL(ttm_device_swapout);
182 
183 /**
184  * ttm_device_init
185  *
186  * @bdev: A pointer to a struct ttm_device to initialize.
187  * @funcs: Function table for the device.
188  * @dev: The core kernel device pointer for DMA mappings and allocations.
189  * @mapping: The address space to use for this bo.
190  * @vma_manager: A pointer to a vma manager.
191  * @use_dma_alloc: If coherent DMA allocation API should be used.
192  * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
193  *
194  * Initializes a struct ttm_device:
195  * Returns:
196  * !0: Failure.
197  */
198 int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
199 		    struct device *dev, struct address_space *mapping,
200 		    struct drm_vma_offset_manager *vma_manager,
201 		    bool use_dma_alloc, bool use_dma32)
202 {
203 	struct ttm_global *glob = &ttm_glob;
204 	int ret, nid;
205 
206 	if (WARN_ON(vma_manager == NULL))
207 		return -EINVAL;
208 
209 	ret = ttm_global_init();
210 	if (ret)
211 		return ret;
212 
213 	bdev->wq = alloc_workqueue("ttm",
214 				   WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
215 	if (!bdev->wq) {
216 		ttm_global_release();
217 		return -ENOMEM;
218 	}
219 
220 	bdev->funcs = funcs;
221 
222 	ttm_sys_man_init(bdev);
223 
224 	if (dev)
225 		nid = dev_to_node(dev);
226 	else
227 		nid = NUMA_NO_NODE;
228 
229 	ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
230 
231 	bdev->vma_manager = vma_manager;
232 	spin_lock_init(&bdev->lru_lock);
233 	INIT_LIST_HEAD(&bdev->pinned);
234 	bdev->dev_mapping = mapping;
235 	mutex_lock(&ttm_global_mutex);
236 	list_add_tail(&bdev->device_list, &glob->device_list);
237 	mutex_unlock(&ttm_global_mutex);
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL(ttm_device_init);
242 
243 void ttm_device_fini(struct ttm_device *bdev)
244 {
245 	struct ttm_resource_manager *man;
246 	unsigned i;
247 
248 	mutex_lock(&ttm_global_mutex);
249 	list_del(&bdev->device_list);
250 	mutex_unlock(&ttm_global_mutex);
251 
252 	drain_workqueue(bdev->wq);
253 	destroy_workqueue(bdev->wq);
254 
255 	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
256 	ttm_resource_manager_set_used(man, false);
257 	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
258 
259 	spin_lock(&bdev->lru_lock);
260 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
261 		if (list_empty(&man->lru[0]))
262 			pr_debug("Swap list %d was clean\n", i);
263 	spin_unlock(&bdev->lru_lock);
264 
265 	ttm_pool_fini(&bdev->pool);
266 	ttm_global_release();
267 }
268 EXPORT_SYMBOL(ttm_device_fini);
269 
270 static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
271 					      struct list_head *list)
272 {
273 	struct ttm_resource *res;
274 
275 	spin_lock(&bdev->lru_lock);
276 	while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
277 		struct ttm_buffer_object *bo = res->bo;
278 
279 		/* Take ref against racing releases once lru_lock is unlocked */
280 		if (!ttm_bo_get_unless_zero(bo))
281 			continue;
282 
283 		list_del_init(&res->lru);
284 		spin_unlock(&bdev->lru_lock);
285 
286 		if (bo->ttm)
287 			ttm_tt_unpopulate(bo->bdev, bo->ttm);
288 
289 		ttm_bo_put(bo);
290 		spin_lock(&bdev->lru_lock);
291 	}
292 	spin_unlock(&bdev->lru_lock);
293 }
294 
295 void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
296 {
297 	struct ttm_resource_manager *man;
298 	unsigned int i, j;
299 
300 	ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
301 
302 	for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
303 		man = ttm_manager_type(bdev, i);
304 		if (!man || !man->use_tt)
305 			continue;
306 
307 		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
308 			ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
309 	}
310 }
311 EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
312