xref: /linux/drivers/gpu/drm/ttm/ttm_tt.c (revision f5c31bcf604db54470868f3118a60dc4a9ba8813)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/cc_platform.h>
35 #include <linux/sched.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/module.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_device.h>
41 #include <drm/drm_util.h>
42 #include <drm/ttm/ttm_bo.h>
43 #include <drm/ttm/ttm_tt.h>
44 
45 #include "ttm_module.h"
46 
47 static unsigned long ttm_pages_limit;
48 
49 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
50 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
51 
52 static unsigned long ttm_dma32_pages_limit;
53 
54 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
55 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
56 
57 static atomic_long_t ttm_pages_allocated;
58 static atomic_long_t ttm_dma32_pages_allocated;
59 
60 /*
61  * Allocates a ttm structure for the given BO.
62  */
63 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
64 {
65 	struct ttm_device *bdev = bo->bdev;
66 	struct drm_device *ddev = bo->base.dev;
67 	uint32_t page_flags = 0;
68 
69 	dma_resv_assert_held(bo->base.resv);
70 
71 	if (bo->ttm)
72 		return 0;
73 
74 	switch (bo->type) {
75 	case ttm_bo_type_device:
76 		if (zero_alloc)
77 			page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
78 		break;
79 	case ttm_bo_type_kernel:
80 		break;
81 	case ttm_bo_type_sg:
82 		page_flags |= TTM_TT_FLAG_EXTERNAL;
83 		break;
84 	default:
85 		pr_err("Illegal buffer object type\n");
86 		return -EINVAL;
87 	}
88 	/*
89 	 * When using dma_alloc_coherent with memory encryption the
90 	 * mapped TT pages need to be decrypted or otherwise the drivers
91 	 * will end up sending encrypted mem to the gpu.
92 	 */
93 	if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
94 		page_flags |= TTM_TT_FLAG_DECRYPTED;
95 		drm_info(ddev, "TT memory decryption enabled.");
96 	}
97 
98 	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
99 	if (unlikely(bo->ttm == NULL))
100 		return -ENOMEM;
101 
102 	WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
103 		!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
104 
105 	return 0;
106 }
107 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
108 
109 /*
110  * Allocates storage for pointers to the pages that back the ttm.
111  */
112 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
113 {
114 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
115 	if (!ttm->pages)
116 		return -ENOMEM;
117 
118 	return 0;
119 }
120 
121 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
122 {
123 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
124 			      sizeof(*ttm->dma_address), GFP_KERNEL);
125 	if (!ttm->pages)
126 		return -ENOMEM;
127 
128 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
129 	return 0;
130 }
131 
132 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
133 {
134 	ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
135 				    GFP_KERNEL);
136 	if (!ttm->dma_address)
137 		return -ENOMEM;
138 
139 	return 0;
140 }
141 
142 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
143 {
144 	bdev->funcs->ttm_tt_destroy(bdev, ttm);
145 }
146 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
147 
148 static void ttm_tt_init_fields(struct ttm_tt *ttm,
149 			       struct ttm_buffer_object *bo,
150 			       uint32_t page_flags,
151 			       enum ttm_caching caching,
152 			       unsigned long extra_pages)
153 {
154 	ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
155 	ttm->page_flags = page_flags;
156 	ttm->dma_address = NULL;
157 	ttm->swap_storage = NULL;
158 	ttm->sg = bo->sg;
159 	ttm->caching = caching;
160 }
161 
162 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
163 		uint32_t page_flags, enum ttm_caching caching,
164 		unsigned long extra_pages)
165 {
166 	ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
167 
168 	if (ttm_tt_alloc_page_directory(ttm)) {
169 		pr_err("Failed allocating page table\n");
170 		return -ENOMEM;
171 	}
172 	return 0;
173 }
174 EXPORT_SYMBOL(ttm_tt_init);
175 
176 void ttm_tt_fini(struct ttm_tt *ttm)
177 {
178 	WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
179 
180 	if (ttm->swap_storage)
181 		fput(ttm->swap_storage);
182 	ttm->swap_storage = NULL;
183 
184 	if (ttm->pages)
185 		kvfree(ttm->pages);
186 	else
187 		kvfree(ttm->dma_address);
188 	ttm->pages = NULL;
189 	ttm->dma_address = NULL;
190 }
191 EXPORT_SYMBOL(ttm_tt_fini);
192 
193 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
194 		   uint32_t page_flags, enum ttm_caching caching)
195 {
196 	int ret;
197 
198 	ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
199 
200 	if (page_flags & TTM_TT_FLAG_EXTERNAL)
201 		ret = ttm_sg_tt_alloc_page_directory(ttm);
202 	else
203 		ret = ttm_dma_tt_alloc_page_directory(ttm);
204 	if (ret) {
205 		pr_err("Failed allocating page table\n");
206 		return -ENOMEM;
207 	}
208 	return 0;
209 }
210 EXPORT_SYMBOL(ttm_sg_tt_init);
211 
212 int ttm_tt_swapin(struct ttm_tt *ttm)
213 {
214 	struct address_space *swap_space;
215 	struct file *swap_storage;
216 	struct page *from_page;
217 	struct page *to_page;
218 	gfp_t gfp_mask;
219 	int i, ret;
220 
221 	swap_storage = ttm->swap_storage;
222 	BUG_ON(swap_storage == NULL);
223 
224 	swap_space = swap_storage->f_mapping;
225 	gfp_mask = mapping_gfp_mask(swap_space);
226 
227 	for (i = 0; i < ttm->num_pages; ++i) {
228 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
229 							gfp_mask);
230 		if (IS_ERR(from_page)) {
231 			ret = PTR_ERR(from_page);
232 			goto out_err;
233 		}
234 		to_page = ttm->pages[i];
235 		if (unlikely(to_page == NULL)) {
236 			ret = -ENOMEM;
237 			goto out_err;
238 		}
239 
240 		copy_highpage(to_page, from_page);
241 		put_page(from_page);
242 	}
243 
244 	fput(swap_storage);
245 	ttm->swap_storage = NULL;
246 	ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
247 
248 	return 0;
249 
250 out_err:
251 	return ret;
252 }
253 
254 /**
255  * ttm_tt_swapout - swap out tt object
256  *
257  * @bdev: TTM device structure.
258  * @ttm: The struct ttm_tt.
259  * @gfp_flags: Flags to use for memory allocation.
260  *
261  * Swapout a TT object to a shmem_file, return number of pages swapped out or
262  * negative error code.
263  */
264 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
265 		   gfp_t gfp_flags)
266 {
267 	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
268 	struct address_space *swap_space;
269 	struct file *swap_storage;
270 	struct page *from_page;
271 	struct page *to_page;
272 	int i, ret;
273 
274 	swap_storage = shmem_file_setup("ttm swap", size, 0);
275 	if (IS_ERR(swap_storage)) {
276 		pr_err("Failed allocating swap storage\n");
277 		return PTR_ERR(swap_storage);
278 	}
279 
280 	swap_space = swap_storage->f_mapping;
281 	gfp_flags &= mapping_gfp_mask(swap_space);
282 
283 	for (i = 0; i < ttm->num_pages; ++i) {
284 		from_page = ttm->pages[i];
285 		if (unlikely(from_page == NULL))
286 			continue;
287 
288 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
289 		if (IS_ERR(to_page)) {
290 			ret = PTR_ERR(to_page);
291 			goto out_err;
292 		}
293 		copy_highpage(to_page, from_page);
294 		set_page_dirty(to_page);
295 		mark_page_accessed(to_page);
296 		put_page(to_page);
297 	}
298 
299 	ttm_tt_unpopulate(bdev, ttm);
300 	ttm->swap_storage = swap_storage;
301 	ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
302 
303 	return ttm->num_pages;
304 
305 out_err:
306 	fput(swap_storage);
307 
308 	return ret;
309 }
310 
311 int ttm_tt_populate(struct ttm_device *bdev,
312 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
313 {
314 	int ret;
315 
316 	if (!ttm)
317 		return -EINVAL;
318 
319 	if (ttm_tt_is_populated(ttm))
320 		return 0;
321 
322 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
323 		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
324 		if (bdev->pool.use_dma32)
325 			atomic_long_add(ttm->num_pages,
326 					&ttm_dma32_pages_allocated);
327 	}
328 
329 	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
330 	       atomic_long_read(&ttm_dma32_pages_allocated) >
331 	       ttm_dma32_pages_limit) {
332 
333 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
334 		if (ret == 0)
335 			break;
336 		if (ret < 0)
337 			goto error;
338 	}
339 
340 	if (bdev->funcs->ttm_tt_populate)
341 		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
342 	else
343 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
344 	if (ret)
345 		goto error;
346 
347 	ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
348 	if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
349 		ret = ttm_tt_swapin(ttm);
350 		if (unlikely(ret != 0)) {
351 			ttm_tt_unpopulate(bdev, ttm);
352 			return ret;
353 		}
354 	}
355 
356 	return 0;
357 
358 error:
359 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
360 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
361 		if (bdev->pool.use_dma32)
362 			atomic_long_sub(ttm->num_pages,
363 					&ttm_dma32_pages_allocated);
364 	}
365 	return ret;
366 }
367 EXPORT_SYMBOL(ttm_tt_populate);
368 
369 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
370 {
371 	if (!ttm_tt_is_populated(ttm))
372 		return;
373 
374 	if (bdev->funcs->ttm_tt_unpopulate)
375 		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
376 	else
377 		ttm_pool_free(&bdev->pool, ttm);
378 
379 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
380 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
381 		if (bdev->pool.use_dma32)
382 			atomic_long_sub(ttm->num_pages,
383 					&ttm_dma32_pages_allocated);
384 	}
385 
386 	ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
387 }
388 
389 #ifdef CONFIG_DEBUG_FS
390 
391 /* Test the shrinker functions and dump the result */
392 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
393 {
394 	struct ttm_operation_ctx ctx = { false, false };
395 
396 	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
397 	return 0;
398 }
399 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
400 
401 #endif
402 
403 
404 /*
405  * ttm_tt_mgr_init - register with the MM shrinker
406  *
407  * Register with the MM shrinker for swapping out BOs.
408  */
409 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
410 {
411 #ifdef CONFIG_DEBUG_FS
412 	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
413 			    &ttm_tt_debugfs_shrink_fops);
414 #endif
415 
416 	if (!ttm_pages_limit)
417 		ttm_pages_limit = num_pages;
418 
419 	if (!ttm_dma32_pages_limit)
420 		ttm_dma32_pages_limit = num_dma32_pages;
421 }
422 
423 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
424 				       struct iosys_map *dmap,
425 				       pgoff_t i)
426 {
427 	struct ttm_kmap_iter_tt *iter_tt =
428 		container_of(iter, typeof(*iter_tt), base);
429 
430 	iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
431 						       iter_tt->prot));
432 }
433 
434 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
435 					 struct iosys_map *map)
436 {
437 	kunmap_local(map->vaddr);
438 }
439 
440 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
441 	.map_local = ttm_kmap_iter_tt_map_local,
442 	.unmap_local = ttm_kmap_iter_tt_unmap_local,
443 	.maps_tt = true,
444 };
445 
446 /**
447  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
448  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
449  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
450  *
451  * Return: Pointer to the embedded struct ttm_kmap_iter.
452  */
453 struct ttm_kmap_iter *
454 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
455 		      struct ttm_tt *tt)
456 {
457 	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
458 	iter_tt->tt = tt;
459 	if (tt)
460 		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
461 	else
462 		iter_tt->prot = PAGE_KERNEL;
463 
464 	return &iter_tt->base;
465 }
466 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
467 
468 unsigned long ttm_tt_pages_limit(void)
469 {
470 	return ttm_pages_limit;
471 }
472 EXPORT_SYMBOL(ttm_tt_pages_limit);
473