xref: /linux/drivers/gpu/drm/ttm/ttm_tt.c (revision 9052e9c95d908d6c3d7570aadc8898e1d871c8bb)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/file.h>
37 #include <drm/drm_cache.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 
40 #include "ttm_module.h"
41 
42 static unsigned long ttm_pages_limit;
43 
44 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
45 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
46 
47 static unsigned long ttm_dma32_pages_limit;
48 
49 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
50 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
51 
52 static atomic_long_t ttm_pages_allocated;
53 static atomic_long_t ttm_dma32_pages_allocated;
54 
55 /*
56  * Allocates a ttm structure for the given BO.
57  */
58 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
59 {
60 	struct ttm_device *bdev = bo->bdev;
61 	uint32_t page_flags = 0;
62 
63 	dma_resv_assert_held(bo->base.resv);
64 
65 	if (bo->ttm)
66 		return 0;
67 
68 	switch (bo->type) {
69 	case ttm_bo_type_device:
70 		if (zero_alloc)
71 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
72 		break;
73 	case ttm_bo_type_kernel:
74 		break;
75 	case ttm_bo_type_sg:
76 		page_flags |= TTM_PAGE_FLAG_SG;
77 		break;
78 	default:
79 		pr_err("Illegal buffer object type\n");
80 		return -EINVAL;
81 	}
82 
83 	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
84 	if (unlikely(bo->ttm == NULL))
85 		return -ENOMEM;
86 
87 	return 0;
88 }
89 
90 /*
91  * Allocates storage for pointers to the pages that back the ttm.
92  */
93 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
94 {
95 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
96 			GFP_KERNEL | __GFP_ZERO);
97 	if (!ttm->pages)
98 		return -ENOMEM;
99 	return 0;
100 }
101 
102 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
103 {
104 	ttm->pages = kvmalloc_array(ttm->num_pages,
105 				    sizeof(*ttm->pages) +
106 				    sizeof(*ttm->dma_address),
107 				    GFP_KERNEL | __GFP_ZERO);
108 	if (!ttm->pages)
109 		return -ENOMEM;
110 
111 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
112 	return 0;
113 }
114 
115 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
116 {
117 	ttm->dma_address = kvmalloc_array(ttm->num_pages,
118 					  sizeof(*ttm->dma_address),
119 					  GFP_KERNEL | __GFP_ZERO);
120 	if (!ttm->dma_address)
121 		return -ENOMEM;
122 	return 0;
123 }
124 
125 void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
126 {
127 	ttm_tt_unpopulate(bdev, ttm);
128 
129 	if (ttm->swap_storage)
130 		fput(ttm->swap_storage);
131 
132 	ttm->swap_storage = NULL;
133 }
134 EXPORT_SYMBOL(ttm_tt_destroy_common);
135 
136 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
137 {
138 	bdev->funcs->ttm_tt_destroy(bdev, ttm);
139 }
140 
141 static void ttm_tt_init_fields(struct ttm_tt *ttm,
142 			       struct ttm_buffer_object *bo,
143 			       uint32_t page_flags,
144 			       enum ttm_caching caching)
145 {
146 	ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
147 	ttm->caching = ttm_cached;
148 	ttm->page_flags = page_flags;
149 	ttm->dma_address = NULL;
150 	ttm->swap_storage = NULL;
151 	ttm->sg = bo->sg;
152 	ttm->caching = caching;
153 }
154 
155 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
156 		uint32_t page_flags, enum ttm_caching caching)
157 {
158 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
159 
160 	if (ttm_tt_alloc_page_directory(ttm)) {
161 		pr_err("Failed allocating page table\n");
162 		return -ENOMEM;
163 	}
164 	return 0;
165 }
166 EXPORT_SYMBOL(ttm_tt_init);
167 
168 void ttm_tt_fini(struct ttm_tt *ttm)
169 {
170 	if (ttm->pages)
171 		kvfree(ttm->pages);
172 	else
173 		kvfree(ttm->dma_address);
174 	ttm->pages = NULL;
175 	ttm->dma_address = NULL;
176 }
177 EXPORT_SYMBOL(ttm_tt_fini);
178 
179 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
180 		   uint32_t page_flags, enum ttm_caching caching)
181 {
182 	int ret;
183 
184 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
185 
186 	if (page_flags & TTM_PAGE_FLAG_SG)
187 		ret = ttm_sg_tt_alloc_page_directory(ttm);
188 	else
189 		ret = ttm_dma_tt_alloc_page_directory(ttm);
190 	if (ret) {
191 		pr_err("Failed allocating page table\n");
192 		return -ENOMEM;
193 	}
194 	return 0;
195 }
196 EXPORT_SYMBOL(ttm_sg_tt_init);
197 
198 int ttm_tt_swapin(struct ttm_tt *ttm)
199 {
200 	struct address_space *swap_space;
201 	struct file *swap_storage;
202 	struct page *from_page;
203 	struct page *to_page;
204 	gfp_t gfp_mask;
205 	int i, ret;
206 
207 	swap_storage = ttm->swap_storage;
208 	BUG_ON(swap_storage == NULL);
209 
210 	swap_space = swap_storage->f_mapping;
211 	gfp_mask = mapping_gfp_mask(swap_space);
212 
213 	for (i = 0; i < ttm->num_pages; ++i) {
214 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
215 							gfp_mask);
216 		if (IS_ERR(from_page)) {
217 			ret = PTR_ERR(from_page);
218 			goto out_err;
219 		}
220 		to_page = ttm->pages[i];
221 		if (unlikely(to_page == NULL)) {
222 			ret = -ENOMEM;
223 			goto out_err;
224 		}
225 
226 		copy_highpage(to_page, from_page);
227 		put_page(from_page);
228 	}
229 
230 	fput(swap_storage);
231 	ttm->swap_storage = NULL;
232 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
233 
234 	return 0;
235 
236 out_err:
237 	return ret;
238 }
239 
240 /**
241  * ttm_tt_swapout - swap out tt object
242  *
243  * @bdev: TTM device structure.
244  * @ttm: The struct ttm_tt.
245  * @gfp_flags: Flags to use for memory allocation.
246  *
247  * Swapout a TT object to a shmem_file, return number of pages swapped out or
248  * negative error code.
249  */
250 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
251 		   gfp_t gfp_flags)
252 {
253 	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
254 	struct address_space *swap_space;
255 	struct file *swap_storage;
256 	struct page *from_page;
257 	struct page *to_page;
258 	int i, ret;
259 
260 	swap_storage = shmem_file_setup("ttm swap", size, 0);
261 	if (IS_ERR(swap_storage)) {
262 		pr_err("Failed allocating swap storage\n");
263 		return PTR_ERR(swap_storage);
264 	}
265 
266 	swap_space = swap_storage->f_mapping;
267 	gfp_flags &= mapping_gfp_mask(swap_space);
268 
269 	for (i = 0; i < ttm->num_pages; ++i) {
270 		from_page = ttm->pages[i];
271 		if (unlikely(from_page == NULL))
272 			continue;
273 
274 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
275 		if (IS_ERR(to_page)) {
276 			ret = PTR_ERR(to_page);
277 			goto out_err;
278 		}
279 		copy_highpage(to_page, from_page);
280 		set_page_dirty(to_page);
281 		mark_page_accessed(to_page);
282 		put_page(to_page);
283 	}
284 
285 	ttm_tt_unpopulate(bdev, ttm);
286 	ttm->swap_storage = swap_storage;
287 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
288 
289 	return ttm->num_pages;
290 
291 out_err:
292 	fput(swap_storage);
293 
294 	return ret;
295 }
296 
297 static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
298 {
299 	pgoff_t i;
300 
301 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
302 		return;
303 
304 	for (i = 0; i < ttm->num_pages; ++i)
305 		ttm->pages[i]->mapping = bdev->dev_mapping;
306 }
307 
308 int ttm_tt_populate(struct ttm_device *bdev,
309 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
310 {
311 	int ret;
312 
313 	if (!ttm)
314 		return -EINVAL;
315 
316 	if (ttm_tt_is_populated(ttm))
317 		return 0;
318 
319 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
320 		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
321 		if (bdev->pool.use_dma32)
322 			atomic_long_add(ttm->num_pages,
323 					&ttm_dma32_pages_allocated);
324 	}
325 
326 	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
327 	       atomic_long_read(&ttm_dma32_pages_allocated) >
328 	       ttm_dma32_pages_limit) {
329 
330 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
331 		if (ret == 0)
332 			break;
333 		if (ret < 0)
334 			goto error;
335 	}
336 
337 	if (bdev->funcs->ttm_tt_populate)
338 		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
339 	else
340 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
341 	if (ret)
342 		goto error;
343 
344 	ttm_tt_add_mapping(bdev, ttm);
345 	ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
346 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
347 		ret = ttm_tt_swapin(ttm);
348 		if (unlikely(ret != 0)) {
349 			ttm_tt_unpopulate(bdev, ttm);
350 			return ret;
351 		}
352 	}
353 
354 	return 0;
355 
356 error:
357 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
358 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
359 		if (bdev->pool.use_dma32)
360 			atomic_long_sub(ttm->num_pages,
361 					&ttm_dma32_pages_allocated);
362 	}
363 	return ret;
364 }
365 EXPORT_SYMBOL(ttm_tt_populate);
366 
367 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
368 {
369 	pgoff_t i;
370 	struct page **page = ttm->pages;
371 
372 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
373 		return;
374 
375 	for (i = 0; i < ttm->num_pages; ++i) {
376 		(*page)->mapping = NULL;
377 		(*page++)->index = 0;
378 	}
379 }
380 
381 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
382 {
383 	if (!ttm_tt_is_populated(ttm))
384 		return;
385 
386 	ttm_tt_clear_mapping(ttm);
387 	if (bdev->funcs->ttm_tt_unpopulate)
388 		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
389 	else
390 		ttm_pool_free(&bdev->pool, ttm);
391 
392 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
393 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
394 		if (bdev->pool.use_dma32)
395 			atomic_long_sub(ttm->num_pages,
396 					&ttm_dma32_pages_allocated);
397 	}
398 
399 	ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
400 }
401 
402 #ifdef CONFIG_DEBUG_FS
403 
404 /* Test the shrinker functions and dump the result */
405 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
406 {
407 	struct ttm_operation_ctx ctx = { false, false };
408 
409 	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
410 	return 0;
411 }
412 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
413 
414 #endif
415 
416 
417 /*
418  * ttm_tt_mgr_init - register with the MM shrinker
419  *
420  * Register with the MM shrinker for swapping out BOs.
421  */
422 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
423 {
424 #ifdef CONFIG_DEBUG_FS
425 	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
426 			    &ttm_tt_debugfs_shrink_fops);
427 #endif
428 
429 	if (!ttm_pages_limit)
430 		ttm_pages_limit = num_pages;
431 
432 	if (!ttm_dma32_pages_limit)
433 		ttm_dma32_pages_limit = num_dma32_pages;
434 }
435 
436 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
437 				       struct dma_buf_map *dmap,
438 				       pgoff_t i)
439 {
440 	struct ttm_kmap_iter_tt *iter_tt =
441 		container_of(iter, typeof(*iter_tt), base);
442 
443 	dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
444 							 iter_tt->prot));
445 }
446 
447 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
448 					 struct dma_buf_map *map)
449 {
450 	kunmap_local(map->vaddr);
451 }
452 
453 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
454 	.map_local = ttm_kmap_iter_tt_map_local,
455 	.unmap_local = ttm_kmap_iter_tt_unmap_local,
456 	.maps_tt = true,
457 };
458 
459 /**
460  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
461  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
462  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
463  *
464  * Return: Pointer to the embedded struct ttm_kmap_iter.
465  */
466 struct ttm_kmap_iter *
467 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
468 		      struct ttm_tt *tt)
469 {
470 	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
471 	iter_tt->tt = tt;
472 	if (tt)
473 		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
474 	else
475 		iter_tt->prot = PAGE_KERNEL;
476 
477 	return &iter_tt->base;
478 }
479 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
480