1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/cc_platform.h>
35 #include <linux/debugfs.h>
36 #include <linux/export.h>
37 #include <linux/file.h>
38 #include <linux/module.h>
39 #include <linux/sched.h>
40 #include <linux/shmem_fs.h>
41 #include <drm/drm_cache.h>
42 #include <drm/drm_device.h>
43 #include <drm/drm_print.h>
44 #include <drm/drm_util.h>
45 #include <drm/ttm/ttm_backup.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_tt.h>
48
49 #include "ttm_module.h"
50 #include "ttm_pool_internal.h"
51
52 static unsigned long ttm_pages_limit;
53
54 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
55 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
56
57 static unsigned long ttm_dma32_pages_limit;
58
59 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
60 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
61
62 static atomic_long_t ttm_pages_allocated;
63 static atomic_long_t ttm_dma32_pages_allocated;
64
65 /*
66 * Allocates a ttm structure for the given BO.
67 */
ttm_tt_create(struct ttm_buffer_object * bo,bool zero_alloc)68 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
69 {
70 struct ttm_device *bdev = bo->bdev;
71 struct drm_device *ddev = bo->base.dev;
72 uint32_t page_flags = 0;
73
74 dma_resv_assert_held(bo->base.resv);
75
76 if (bo->ttm)
77 return 0;
78
79 switch (bo->type) {
80 case ttm_bo_type_device:
81 if (zero_alloc)
82 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
83 break;
84 case ttm_bo_type_kernel:
85 break;
86 case ttm_bo_type_sg:
87 page_flags |= TTM_TT_FLAG_EXTERNAL;
88 break;
89 default:
90 pr_err("Illegal buffer object type\n");
91 return -EINVAL;
92 }
93 /*
94 * When using dma_alloc_coherent with memory encryption the
95 * mapped TT pages need to be decrypted or otherwise the drivers
96 * will end up sending encrypted mem to the gpu.
97 */
98 if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
99 cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
100 page_flags |= TTM_TT_FLAG_DECRYPTED;
101 drm_info_once(ddev, "TT memory decryption enabled.");
102 }
103
104 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
105 if (unlikely(bo->ttm == NULL))
106 return -ENOMEM;
107
108 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
109 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
110
111 return 0;
112 }
113 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
114
115 /*
116 * Allocates storage for pointers to the pages that back the ttm.
117 */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)118 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
119 {
120 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
121 if (!ttm->pages)
122 return -ENOMEM;
123
124 return 0;
125 }
126
ttm_dma_tt_alloc_page_directory(struct ttm_tt * ttm)127 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
128 {
129 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
130 sizeof(*ttm->dma_address), GFP_KERNEL);
131 if (!ttm->pages)
132 return -ENOMEM;
133
134 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
135 return 0;
136 }
137
ttm_sg_tt_alloc_page_directory(struct ttm_tt * ttm)138 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
139 {
140 ttm->dma_address = kvzalloc_objs(*ttm->dma_address, ttm->num_pages);
141 if (!ttm->dma_address)
142 return -ENOMEM;
143
144 return 0;
145 }
146
ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)147 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
148 {
149 bdev->funcs->ttm_tt_destroy(bdev, ttm);
150 }
151 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
152
ttm_tt_init_fields(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)153 static void ttm_tt_init_fields(struct ttm_tt *ttm,
154 struct ttm_buffer_object *bo,
155 uint32_t page_flags,
156 enum ttm_caching caching,
157 unsigned long extra_pages)
158 {
159 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
160 ttm->page_flags = page_flags;
161 ttm->dma_address = NULL;
162 ttm->swap_storage = NULL;
163 ttm->sg = bo->sg;
164 ttm->caching = caching;
165 ttm->restore = NULL;
166 ttm->backup = NULL;
167 }
168
ttm_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)169 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
170 uint32_t page_flags, enum ttm_caching caching,
171 unsigned long extra_pages)
172 {
173 ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
174
175 if (ttm_tt_alloc_page_directory(ttm)) {
176 pr_err("Failed allocating page table\n");
177 return -ENOMEM;
178 }
179 return 0;
180 }
181 EXPORT_SYMBOL(ttm_tt_init);
182
ttm_tt_fini(struct ttm_tt * ttm)183 void ttm_tt_fini(struct ttm_tt *ttm)
184 {
185 WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
186
187 if (ttm->swap_storage)
188 fput(ttm->swap_storage);
189 ttm->swap_storage = NULL;
190
191 if (ttm_tt_is_backed_up(ttm))
192 ttm_pool_drop_backed_up(ttm);
193 if (ttm->backup) {
194 ttm_backup_fini(ttm->backup);
195 ttm->backup = NULL;
196 }
197
198 if (ttm->pages)
199 kvfree(ttm->pages);
200 else
201 kvfree(ttm->dma_address);
202 ttm->pages = NULL;
203 ttm->dma_address = NULL;
204 }
205 EXPORT_SYMBOL(ttm_tt_fini);
206
ttm_sg_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching)207 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
208 uint32_t page_flags, enum ttm_caching caching)
209 {
210 int ret;
211
212 ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
213
214 if (page_flags & TTM_TT_FLAG_EXTERNAL)
215 ret = ttm_sg_tt_alloc_page_directory(ttm);
216 else
217 ret = ttm_dma_tt_alloc_page_directory(ttm);
218 if (ret) {
219 pr_err("Failed allocating page table\n");
220 return -ENOMEM;
221 }
222 return 0;
223 }
224 EXPORT_SYMBOL(ttm_sg_tt_init);
225
ttm_tt_swapin(struct ttm_tt * ttm)226 int ttm_tt_swapin(struct ttm_tt *ttm)
227 {
228 struct address_space *swap_space;
229 struct file *swap_storage;
230 struct page *from_page;
231 struct page *to_page;
232 gfp_t gfp_mask;
233 int i, ret;
234
235 swap_storage = ttm->swap_storage;
236 BUG_ON(swap_storage == NULL);
237
238 swap_space = swap_storage->f_mapping;
239 gfp_mask = mapping_gfp_mask(swap_space);
240
241 for (i = 0; i < ttm->num_pages; ++i) {
242 from_page = shmem_read_mapping_page_gfp(swap_space, i,
243 gfp_mask);
244 if (IS_ERR(from_page)) {
245 ret = PTR_ERR(from_page);
246 goto out_err;
247 }
248 to_page = ttm->pages[i];
249 if (unlikely(to_page == NULL)) {
250 ret = -ENOMEM;
251 goto out_err;
252 }
253
254 copy_highpage(to_page, from_page);
255 put_page(from_page);
256 }
257
258 fput(swap_storage);
259 ttm->swap_storage = NULL;
260 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
261
262 return 0;
263
264 out_err:
265 return ret;
266 }
267 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
268
269 /**
270 * ttm_tt_backup() - Helper to back up a struct ttm_tt.
271 * @bdev: The TTM device.
272 * @tt: The struct ttm_tt.
273 * @flags: Flags that govern the backup behaviour.
274 *
275 * Update the page accounting and call ttm_pool_shrink_tt to free pages
276 * or back them up.
277 *
278 * Return: Number of pages freed or swapped out, or negative error code on
279 * error.
280 */
ttm_tt_backup(struct ttm_device * bdev,struct ttm_tt * tt,const struct ttm_backup_flags flags)281 long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
282 const struct ttm_backup_flags flags)
283 {
284 long ret;
285
286 if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
287 return 0;
288
289 ret = ttm_pool_backup(&bdev->pool, tt, &flags);
290 if (ret > 0) {
291 tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
292 tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
293 }
294
295 return ret;
296 }
297
ttm_tt_restore(struct ttm_device * bdev,struct ttm_tt * tt,const struct ttm_operation_ctx * ctx)298 int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
299 const struct ttm_operation_ctx *ctx)
300 {
301 int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
302
303 if (ret)
304 return ret;
305
306 tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
307
308 return 0;
309 }
310 EXPORT_SYMBOL(ttm_tt_restore);
311
312 /**
313 * ttm_tt_swapout - swap out tt object
314 *
315 * @bdev: TTM device structure.
316 * @ttm: The struct ttm_tt.
317 * @gfp_flags: Flags to use for memory allocation.
318 *
319 * Swapout a TT object to a shmem_file, return number of pages swapped out or
320 * negative error code.
321 */
ttm_tt_swapout(struct ttm_device * bdev,struct ttm_tt * ttm,gfp_t gfp_flags)322 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
323 gfp_t gfp_flags)
324 {
325 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
326 struct address_space *swap_space;
327 struct file *swap_storage;
328 struct page *from_page;
329 struct page *to_page;
330 int i, ret;
331
332 swap_storage = shmem_file_setup("ttm swap", size, EMPTY_VMA_FLAGS);
333 if (IS_ERR(swap_storage)) {
334 pr_err("Failed allocating swap storage\n");
335 return PTR_ERR(swap_storage);
336 }
337
338 swap_space = swap_storage->f_mapping;
339 gfp_flags &= mapping_gfp_mask(swap_space);
340
341 for (i = 0; i < ttm->num_pages; ++i) {
342 from_page = ttm->pages[i];
343 if (unlikely(from_page == NULL))
344 continue;
345
346 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
347 if (IS_ERR(to_page)) {
348 ret = PTR_ERR(to_page);
349 goto out_err;
350 }
351 copy_highpage(to_page, from_page);
352 set_page_dirty(to_page);
353 mark_page_accessed(to_page);
354 put_page(to_page);
355 }
356
357 ttm_tt_unpopulate(bdev, ttm);
358 ttm->swap_storage = swap_storage;
359 ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
360
361 return ttm->num_pages;
362
363 out_err:
364 fput(swap_storage);
365
366 return ret;
367 }
368 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
369
ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)370 int ttm_tt_populate(struct ttm_device *bdev,
371 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
372 {
373 int ret;
374
375 if (!ttm)
376 return -EINVAL;
377
378 if (ttm_tt_is_populated(ttm))
379 return 0;
380
381 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
382 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
383 if (ttm_pool_uses_dma32(&bdev->pool))
384 atomic_long_add(ttm->num_pages,
385 &ttm_dma32_pages_allocated);
386 }
387
388 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
389 atomic_long_read(&ttm_dma32_pages_allocated) >
390 ttm_dma32_pages_limit) {
391
392 ret = ttm_global_swapout(ctx, GFP_KERNEL);
393 if (ret == 0)
394 break;
395 if (ret < 0)
396 goto error;
397 }
398
399 if (bdev->funcs->ttm_tt_populate)
400 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
401 else
402 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
403 if (ret)
404 goto error;
405
406 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
407 ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
408 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
409 ret = ttm_tt_swapin(ttm);
410 if (unlikely(ret != 0)) {
411 ttm_tt_unpopulate(bdev, ttm);
412 return ret;
413 }
414 }
415
416 return 0;
417
418 error:
419 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
420 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
421 if (ttm_pool_uses_dma32(&bdev->pool))
422 atomic_long_sub(ttm->num_pages,
423 &ttm_dma32_pages_allocated);
424 }
425 return ret;
426 }
427
428 #if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
429 EXPORT_SYMBOL(ttm_tt_populate);
430 #endif
431
ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)432 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
433 {
434 if (!ttm_tt_is_populated(ttm))
435 return;
436
437 if (bdev->funcs->ttm_tt_unpopulate)
438 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
439 else
440 ttm_pool_free(&bdev->pool, ttm);
441
442 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
443 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
444 if (ttm_pool_uses_dma32(&bdev->pool))
445 atomic_long_sub(ttm->num_pages,
446 &ttm_dma32_pages_allocated);
447 }
448
449 ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
450 }
451 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
452
453 #ifdef CONFIG_DEBUG_FS
454
455 /* Test the shrinker functions and dump the result */
ttm_tt_debugfs_shrink_show(struct seq_file * m,void * data)456 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
457 {
458 struct ttm_operation_ctx ctx = { };
459
460 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
461 return 0;
462 }
463 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
464
465 #endif
466
467
468 /*
469 * ttm_tt_mgr_init - register with the MM shrinker
470 *
471 * Register with the MM shrinker for swapping out BOs.
472 */
ttm_tt_mgr_init(unsigned long num_pages,unsigned long num_dma32_pages)473 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
474 {
475 #ifdef CONFIG_DEBUG_FS
476 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
477 &ttm_tt_debugfs_shrink_fops);
478 #endif
479
480 if (!ttm_pages_limit)
481 ttm_pages_limit = num_pages;
482
483 if (!ttm_dma32_pages_limit)
484 ttm_dma32_pages_limit = num_dma32_pages;
485 }
486
ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter * iter,struct iosys_map * dmap,pgoff_t i)487 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
488 struct iosys_map *dmap,
489 pgoff_t i)
490 {
491 struct ttm_kmap_iter_tt *iter_tt =
492 container_of(iter, typeof(*iter_tt), base);
493
494 iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
495 iter_tt->prot));
496 }
497
ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter * iter,struct iosys_map * map)498 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
499 struct iosys_map *map)
500 {
501 kunmap_local(map->vaddr);
502 }
503
504 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
505 .map_local = ttm_kmap_iter_tt_map_local,
506 .unmap_local = ttm_kmap_iter_tt_unmap_local,
507 .maps_tt = true,
508 };
509
510 /**
511 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
512 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
513 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
514 *
515 * Return: Pointer to the embedded struct ttm_kmap_iter.
516 */
517 struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt * iter_tt,struct ttm_tt * tt)518 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
519 struct ttm_tt *tt)
520 {
521 iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
522 iter_tt->tt = tt;
523 if (tt)
524 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
525 else
526 iter_tt->prot = PAGE_KERNEL;
527
528 return &iter_tt->base;
529 }
530 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
531
ttm_tt_pages_limit(void)532 unsigned long ttm_tt_pages_limit(void)
533 {
534 return ttm_pages_limit;
535 }
536 EXPORT_SYMBOL(ttm_tt_pages_limit);
537
538 /**
539 * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
540 * @tt: The ttm_tt for wich to allocate and assign a backup structure.
541 *
542 * Assign a backup structure to be used for tt backup. This should
543 * typically be done at bo creation, to avoid allocations at shrinking
544 * time.
545 *
546 * Return: 0 on success, negative error code on failure.
547 */
ttm_tt_setup_backup(struct ttm_tt * tt)548 int ttm_tt_setup_backup(struct ttm_tt *tt)
549 {
550 struct file *backup =
551 ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
552
553 if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
554 return -EINVAL;
555
556 if (IS_ERR(backup))
557 return PTR_ERR(backup);
558
559 if (tt->backup)
560 ttm_backup_fini(tt->backup);
561
562 tt->backup = backup;
563 return 0;
564 }
565 EXPORT_SYMBOL(ttm_tt_setup_backup);
566