xref: /linux/drivers/gpu/drm/ttm/ttm_tt.c (revision ea34704d6ad7225421cc3543906deacae35a6ea2)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/cc_platform.h>
35 #include <linux/debugfs.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <linux/sched.h>
39 #include <linux/shmem_fs.h>
40 #include <drm/drm_cache.h>
41 #include <drm/drm_device.h>
42 #include <drm/drm_util.h>
43 #include <drm/ttm/ttm_backup.h>
44 #include <drm/ttm/ttm_bo.h>
45 #include <drm/ttm/ttm_tt.h>
46 
47 #include "ttm_module.h"
48 
49 static unsigned long ttm_pages_limit;
50 
51 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
52 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
53 
54 static unsigned long ttm_dma32_pages_limit;
55 
56 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
57 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
58 
59 static atomic_long_t ttm_pages_allocated;
60 static atomic_long_t ttm_dma32_pages_allocated;
61 
62 /*
63  * Allocates a ttm structure for the given BO.
64  */
ttm_tt_create(struct ttm_buffer_object * bo,bool zero_alloc)65 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
66 {
67 	struct ttm_device *bdev = bo->bdev;
68 	struct drm_device *ddev = bo->base.dev;
69 	uint32_t page_flags = 0;
70 
71 	dma_resv_assert_held(bo->base.resv);
72 
73 	if (bo->ttm)
74 		return 0;
75 
76 	switch (bo->type) {
77 	case ttm_bo_type_device:
78 		if (zero_alloc)
79 			page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
80 		break;
81 	case ttm_bo_type_kernel:
82 		break;
83 	case ttm_bo_type_sg:
84 		page_flags |= TTM_TT_FLAG_EXTERNAL;
85 		break;
86 	default:
87 		pr_err("Illegal buffer object type\n");
88 		return -EINVAL;
89 	}
90 	/*
91 	 * When using dma_alloc_coherent with memory encryption the
92 	 * mapped TT pages need to be decrypted or otherwise the drivers
93 	 * will end up sending encrypted mem to the gpu.
94 	 */
95 	if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
96 		page_flags |= TTM_TT_FLAG_DECRYPTED;
97 		drm_info_once(ddev, "TT memory decryption enabled.");
98 	}
99 
100 	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
101 	if (unlikely(bo->ttm == NULL))
102 		return -ENOMEM;
103 
104 	WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
105 		!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
106 
107 	return 0;
108 }
109 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
110 
111 /*
112  * Allocates storage for pointers to the pages that back the ttm.
113  */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)114 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
115 {
116 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
117 	if (!ttm->pages)
118 		return -ENOMEM;
119 
120 	return 0;
121 }
122 
ttm_dma_tt_alloc_page_directory(struct ttm_tt * ttm)123 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
124 {
125 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
126 			      sizeof(*ttm->dma_address), GFP_KERNEL);
127 	if (!ttm->pages)
128 		return -ENOMEM;
129 
130 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
131 	return 0;
132 }
133 
ttm_sg_tt_alloc_page_directory(struct ttm_tt * ttm)134 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
135 {
136 	ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
137 				    GFP_KERNEL);
138 	if (!ttm->dma_address)
139 		return -ENOMEM;
140 
141 	return 0;
142 }
143 
ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)144 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
145 {
146 	bdev->funcs->ttm_tt_destroy(bdev, ttm);
147 }
148 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
149 
ttm_tt_init_fields(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)150 static void ttm_tt_init_fields(struct ttm_tt *ttm,
151 			       struct ttm_buffer_object *bo,
152 			       uint32_t page_flags,
153 			       enum ttm_caching caching,
154 			       unsigned long extra_pages)
155 {
156 	ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
157 	ttm->page_flags = page_flags;
158 	ttm->dma_address = NULL;
159 	ttm->swap_storage = NULL;
160 	ttm->sg = bo->sg;
161 	ttm->caching = caching;
162 	ttm->restore = NULL;
163 	ttm->backup = NULL;
164 }
165 
ttm_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)166 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
167 		uint32_t page_flags, enum ttm_caching caching,
168 		unsigned long extra_pages)
169 {
170 	ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
171 
172 	if (ttm_tt_alloc_page_directory(ttm)) {
173 		pr_err("Failed allocating page table\n");
174 		return -ENOMEM;
175 	}
176 	return 0;
177 }
178 EXPORT_SYMBOL(ttm_tt_init);
179 
ttm_tt_fini(struct ttm_tt * ttm)180 void ttm_tt_fini(struct ttm_tt *ttm)
181 {
182 	WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
183 
184 	if (ttm->swap_storage)
185 		fput(ttm->swap_storage);
186 	ttm->swap_storage = NULL;
187 
188 	if (ttm_tt_is_backed_up(ttm))
189 		ttm_pool_drop_backed_up(ttm);
190 	if (ttm->backup) {
191 		ttm_backup_fini(ttm->backup);
192 		ttm->backup = NULL;
193 	}
194 
195 	if (ttm->pages)
196 		kvfree(ttm->pages);
197 	else
198 		kvfree(ttm->dma_address);
199 	ttm->pages = NULL;
200 	ttm->dma_address = NULL;
201 }
202 EXPORT_SYMBOL(ttm_tt_fini);
203 
ttm_sg_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching)204 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
205 		   uint32_t page_flags, enum ttm_caching caching)
206 {
207 	int ret;
208 
209 	ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
210 
211 	if (page_flags & TTM_TT_FLAG_EXTERNAL)
212 		ret = ttm_sg_tt_alloc_page_directory(ttm);
213 	else
214 		ret = ttm_dma_tt_alloc_page_directory(ttm);
215 	if (ret) {
216 		pr_err("Failed allocating page table\n");
217 		return -ENOMEM;
218 	}
219 	return 0;
220 }
221 EXPORT_SYMBOL(ttm_sg_tt_init);
222 
ttm_tt_swapin(struct ttm_tt * ttm)223 int ttm_tt_swapin(struct ttm_tt *ttm)
224 {
225 	struct address_space *swap_space;
226 	struct file *swap_storage;
227 	struct page *from_page;
228 	struct page *to_page;
229 	gfp_t gfp_mask;
230 	int i, ret;
231 
232 	swap_storage = ttm->swap_storage;
233 	BUG_ON(swap_storage == NULL);
234 
235 	swap_space = swap_storage->f_mapping;
236 	gfp_mask = mapping_gfp_mask(swap_space);
237 
238 	for (i = 0; i < ttm->num_pages; ++i) {
239 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
240 							gfp_mask);
241 		if (IS_ERR(from_page)) {
242 			ret = PTR_ERR(from_page);
243 			goto out_err;
244 		}
245 		to_page = ttm->pages[i];
246 		if (unlikely(to_page == NULL)) {
247 			ret = -ENOMEM;
248 			goto out_err;
249 		}
250 
251 		copy_highpage(to_page, from_page);
252 		put_page(from_page);
253 	}
254 
255 	fput(swap_storage);
256 	ttm->swap_storage = NULL;
257 	ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
258 
259 	return 0;
260 
261 out_err:
262 	return ret;
263 }
264 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
265 
266 /**
267  * ttm_tt_backup() - Helper to back up a struct ttm_tt.
268  * @bdev: The TTM device.
269  * @tt: The struct ttm_tt.
270  * @flags: Flags that govern the backup behaviour.
271  *
272  * Update the page accounting and call ttm_pool_shrink_tt to free pages
273  * or back them up.
274  *
275  * Return: Number of pages freed or swapped out, or negative error code on
276  * error.
277  */
ttm_tt_backup(struct ttm_device * bdev,struct ttm_tt * tt,const struct ttm_backup_flags flags)278 long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
279 		   const struct ttm_backup_flags flags)
280 {
281 	long ret;
282 
283 	if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
284 		return 0;
285 
286 	ret = ttm_pool_backup(&bdev->pool, tt, &flags);
287 	if (ret > 0) {
288 		tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
289 		tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
290 	}
291 
292 	return ret;
293 }
294 
ttm_tt_restore(struct ttm_device * bdev,struct ttm_tt * tt,const struct ttm_operation_ctx * ctx)295 int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
296 		   const struct ttm_operation_ctx *ctx)
297 {
298 	int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
299 
300 	if (ret)
301 		return ret;
302 
303 	tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
304 
305 	return 0;
306 }
307 EXPORT_SYMBOL(ttm_tt_restore);
308 
309 /**
310  * ttm_tt_swapout - swap out tt object
311  *
312  * @bdev: TTM device structure.
313  * @ttm: The struct ttm_tt.
314  * @gfp_flags: Flags to use for memory allocation.
315  *
316  * Swapout a TT object to a shmem_file, return number of pages swapped out or
317  * negative error code.
318  */
ttm_tt_swapout(struct ttm_device * bdev,struct ttm_tt * ttm,gfp_t gfp_flags)319 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
320 		   gfp_t gfp_flags)
321 {
322 	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
323 	struct address_space *swap_space;
324 	struct file *swap_storage;
325 	struct page *from_page;
326 	struct page *to_page;
327 	int i, ret;
328 
329 	swap_storage = shmem_file_setup("ttm swap", size, 0);
330 	if (IS_ERR(swap_storage)) {
331 		pr_err("Failed allocating swap storage\n");
332 		return PTR_ERR(swap_storage);
333 	}
334 
335 	swap_space = swap_storage->f_mapping;
336 	gfp_flags &= mapping_gfp_mask(swap_space);
337 
338 	for (i = 0; i < ttm->num_pages; ++i) {
339 		from_page = ttm->pages[i];
340 		if (unlikely(from_page == NULL))
341 			continue;
342 
343 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
344 		if (IS_ERR(to_page)) {
345 			ret = PTR_ERR(to_page);
346 			goto out_err;
347 		}
348 		copy_highpage(to_page, from_page);
349 		set_page_dirty(to_page);
350 		mark_page_accessed(to_page);
351 		put_page(to_page);
352 	}
353 
354 	ttm_tt_unpopulate(bdev, ttm);
355 	ttm->swap_storage = swap_storage;
356 	ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
357 
358 	return ttm->num_pages;
359 
360 out_err:
361 	fput(swap_storage);
362 
363 	return ret;
364 }
365 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
366 
ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)367 int ttm_tt_populate(struct ttm_device *bdev,
368 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
369 {
370 	int ret;
371 
372 	if (!ttm)
373 		return -EINVAL;
374 
375 	if (ttm_tt_is_populated(ttm))
376 		return 0;
377 
378 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
379 		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
380 		if (bdev->pool.use_dma32)
381 			atomic_long_add(ttm->num_pages,
382 					&ttm_dma32_pages_allocated);
383 	}
384 
385 	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
386 	       atomic_long_read(&ttm_dma32_pages_allocated) >
387 	       ttm_dma32_pages_limit) {
388 
389 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
390 		if (ret == 0)
391 			break;
392 		if (ret < 0)
393 			goto error;
394 	}
395 
396 	if (bdev->funcs->ttm_tt_populate)
397 		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
398 	else
399 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
400 	if (ret)
401 		goto error;
402 
403 	ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
404 	ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
405 	if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
406 		ret = ttm_tt_swapin(ttm);
407 		if (unlikely(ret != 0)) {
408 			ttm_tt_unpopulate(bdev, ttm);
409 			return ret;
410 		}
411 	}
412 
413 	return 0;
414 
415 error:
416 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
417 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
418 		if (bdev->pool.use_dma32)
419 			atomic_long_sub(ttm->num_pages,
420 					&ttm_dma32_pages_allocated);
421 	}
422 	return ret;
423 }
424 
425 #if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
426 EXPORT_SYMBOL(ttm_tt_populate);
427 #endif
428 
ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)429 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
430 {
431 	if (!ttm_tt_is_populated(ttm))
432 		return;
433 
434 	if (bdev->funcs->ttm_tt_unpopulate)
435 		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
436 	else
437 		ttm_pool_free(&bdev->pool, ttm);
438 
439 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
440 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
441 		if (bdev->pool.use_dma32)
442 			atomic_long_sub(ttm->num_pages,
443 					&ttm_dma32_pages_allocated);
444 	}
445 
446 	ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
447 }
448 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
449 
450 #ifdef CONFIG_DEBUG_FS
451 
452 /* Test the shrinker functions and dump the result */
ttm_tt_debugfs_shrink_show(struct seq_file * m,void * data)453 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
454 {
455 	struct ttm_operation_ctx ctx = { false, false };
456 
457 	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
458 	return 0;
459 }
460 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
461 
462 #endif
463 
464 
465 /*
466  * ttm_tt_mgr_init - register with the MM shrinker
467  *
468  * Register with the MM shrinker for swapping out BOs.
469  */
ttm_tt_mgr_init(unsigned long num_pages,unsigned long num_dma32_pages)470 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
471 {
472 #ifdef CONFIG_DEBUG_FS
473 	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
474 			    &ttm_tt_debugfs_shrink_fops);
475 #endif
476 
477 	if (!ttm_pages_limit)
478 		ttm_pages_limit = num_pages;
479 
480 	if (!ttm_dma32_pages_limit)
481 		ttm_dma32_pages_limit = num_dma32_pages;
482 }
483 
ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter * iter,struct iosys_map * dmap,pgoff_t i)484 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
485 				       struct iosys_map *dmap,
486 				       pgoff_t i)
487 {
488 	struct ttm_kmap_iter_tt *iter_tt =
489 		container_of(iter, typeof(*iter_tt), base);
490 
491 	iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
492 						       iter_tt->prot));
493 }
494 
ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter * iter,struct iosys_map * map)495 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
496 					 struct iosys_map *map)
497 {
498 	kunmap_local(map->vaddr);
499 }
500 
501 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
502 	.map_local = ttm_kmap_iter_tt_map_local,
503 	.unmap_local = ttm_kmap_iter_tt_unmap_local,
504 	.maps_tt = true,
505 };
506 
507 /**
508  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
509  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
510  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
511  *
512  * Return: Pointer to the embedded struct ttm_kmap_iter.
513  */
514 struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt * iter_tt,struct ttm_tt * tt)515 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
516 		      struct ttm_tt *tt)
517 {
518 	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
519 	iter_tt->tt = tt;
520 	if (tt)
521 		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
522 	else
523 		iter_tt->prot = PAGE_KERNEL;
524 
525 	return &iter_tt->base;
526 }
527 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
528 
ttm_tt_pages_limit(void)529 unsigned long ttm_tt_pages_limit(void)
530 {
531 	return ttm_pages_limit;
532 }
533 EXPORT_SYMBOL(ttm_tt_pages_limit);
534 
535 /**
536  * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
537  * @tt: The ttm_tt for wich to allocate and assign a backup structure.
538  *
539  * Assign a backup structure to be used for tt backup. This should
540  * typically be done at bo creation, to avoid allocations at shrinking
541  * time.
542  *
543  * Return: 0 on success, negative error code on failure.
544  */
ttm_tt_setup_backup(struct ttm_tt * tt)545 int ttm_tt_setup_backup(struct ttm_tt *tt)
546 {
547 	struct file *backup =
548 		ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
549 
550 	if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
551 		return -EINVAL;
552 
553 	if (IS_ERR(backup))
554 		return PTR_ERR(backup);
555 
556 	if (tt->backup)
557 		ttm_backup_fini(tt->backup);
558 
559 	tt->backup = backup;
560 	return 0;
561 }
562 EXPORT_SYMBOL(ttm_tt_setup_backup);
563