1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <linux/cc_platform.h> 35 #include <linux/debugfs.h> 36 #include <linux/export.h> 37 #include <linux/file.h> 38 #include <linux/module.h> 39 #include <linux/sched.h> 40 #include <linux/shmem_fs.h> 41 #include <drm/drm_cache.h> 42 #include <drm/drm_device.h> 43 #include <drm/drm_util.h> 44 #include <drm/ttm/ttm_backup.h> 45 #include <drm/ttm/ttm_bo.h> 46 #include <drm/ttm/ttm_tt.h> 47 48 #include "ttm_module.h" 49 50 static unsigned long ttm_pages_limit; 51 52 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages"); 53 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644); 54 55 static unsigned long ttm_dma32_pages_limit; 56 57 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages"); 58 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644); 59 60 static atomic_long_t ttm_pages_allocated; 61 static atomic_long_t ttm_dma32_pages_allocated; 62 63 /* 64 * Allocates a ttm structure for the given BO. 65 */ 66 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) 67 { 68 struct ttm_device *bdev = bo->bdev; 69 struct drm_device *ddev = bo->base.dev; 70 uint32_t page_flags = 0; 71 72 dma_resv_assert_held(bo->base.resv); 73 74 if (bo->ttm) 75 return 0; 76 77 switch (bo->type) { 78 case ttm_bo_type_device: 79 if (zero_alloc) 80 page_flags |= TTM_TT_FLAG_ZERO_ALLOC; 81 break; 82 case ttm_bo_type_kernel: 83 break; 84 case ttm_bo_type_sg: 85 page_flags |= TTM_TT_FLAG_EXTERNAL; 86 break; 87 default: 88 pr_err("Illegal buffer object type\n"); 89 return -EINVAL; 90 } 91 /* 92 * When using dma_alloc_coherent with memory encryption the 93 * mapped TT pages need to be decrypted or otherwise the drivers 94 * will end up sending encrypted mem to the gpu. 95 */ 96 if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 97 page_flags |= TTM_TT_FLAG_DECRYPTED; 98 drm_info_once(ddev, "TT memory decryption enabled."); 99 } 100 101 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); 102 if (unlikely(bo->ttm == NULL)) 103 return -ENOMEM; 104 105 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE && 106 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)); 107 108 return 0; 109 } 110 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create); 111 112 /* 113 * Allocates storage for pointers to the pages that back the ttm. 114 */ 115 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 116 { 117 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); 118 if (!ttm->pages) 119 return -ENOMEM; 120 121 return 0; 122 } 123 124 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) 125 { 126 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + 127 sizeof(*ttm->dma_address), GFP_KERNEL); 128 if (!ttm->pages) 129 return -ENOMEM; 130 131 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); 132 return 0; 133 } 134 135 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) 136 { 137 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address), 138 GFP_KERNEL); 139 if (!ttm->dma_address) 140 return -ENOMEM; 141 142 return 0; 143 } 144 145 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) 146 { 147 bdev->funcs->ttm_tt_destroy(bdev, ttm); 148 } 149 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy); 150 151 static void ttm_tt_init_fields(struct ttm_tt *ttm, 152 struct ttm_buffer_object *bo, 153 uint32_t page_flags, 154 enum ttm_caching caching, 155 unsigned long extra_pages) 156 { 157 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; 158 ttm->page_flags = page_flags; 159 ttm->dma_address = NULL; 160 ttm->swap_storage = NULL; 161 ttm->sg = bo->sg; 162 ttm->caching = caching; 163 ttm->restore = NULL; 164 ttm->backup = NULL; 165 } 166 167 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 168 uint32_t page_flags, enum ttm_caching caching, 169 unsigned long extra_pages) 170 { 171 ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages); 172 173 if (ttm_tt_alloc_page_directory(ttm)) { 174 pr_err("Failed allocating page table\n"); 175 return -ENOMEM; 176 } 177 return 0; 178 } 179 EXPORT_SYMBOL(ttm_tt_init); 180 181 void ttm_tt_fini(struct ttm_tt *ttm) 182 { 183 WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED); 184 185 if (ttm->swap_storage) 186 fput(ttm->swap_storage); 187 ttm->swap_storage = NULL; 188 189 if (ttm_tt_is_backed_up(ttm)) 190 ttm_pool_drop_backed_up(ttm); 191 if (ttm->backup) { 192 ttm_backup_fini(ttm->backup); 193 ttm->backup = NULL; 194 } 195 196 if (ttm->pages) 197 kvfree(ttm->pages); 198 else 199 kvfree(ttm->dma_address); 200 ttm->pages = NULL; 201 ttm->dma_address = NULL; 202 } 203 EXPORT_SYMBOL(ttm_tt_fini); 204 205 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 206 uint32_t page_flags, enum ttm_caching caching) 207 { 208 int ret; 209 210 ttm_tt_init_fields(ttm, bo, page_flags, caching, 0); 211 212 if (page_flags & TTM_TT_FLAG_EXTERNAL) 213 ret = ttm_sg_tt_alloc_page_directory(ttm); 214 else 215 ret = ttm_dma_tt_alloc_page_directory(ttm); 216 if (ret) { 217 pr_err("Failed allocating page table\n"); 218 return -ENOMEM; 219 } 220 return 0; 221 } 222 EXPORT_SYMBOL(ttm_sg_tt_init); 223 224 int ttm_tt_swapin(struct ttm_tt *ttm) 225 { 226 struct address_space *swap_space; 227 struct file *swap_storage; 228 struct page *from_page; 229 struct page *to_page; 230 gfp_t gfp_mask; 231 int i, ret; 232 233 swap_storage = ttm->swap_storage; 234 BUG_ON(swap_storage == NULL); 235 236 swap_space = swap_storage->f_mapping; 237 gfp_mask = mapping_gfp_mask(swap_space); 238 239 for (i = 0; i < ttm->num_pages; ++i) { 240 from_page = shmem_read_mapping_page_gfp(swap_space, i, 241 gfp_mask); 242 if (IS_ERR(from_page)) { 243 ret = PTR_ERR(from_page); 244 goto out_err; 245 } 246 to_page = ttm->pages[i]; 247 if (unlikely(to_page == NULL)) { 248 ret = -ENOMEM; 249 goto out_err; 250 } 251 252 copy_highpage(to_page, from_page); 253 put_page(from_page); 254 } 255 256 fput(swap_storage); 257 ttm->swap_storage = NULL; 258 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; 259 260 return 0; 261 262 out_err: 263 return ret; 264 } 265 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin); 266 267 /** 268 * ttm_tt_backup() - Helper to back up a struct ttm_tt. 269 * @bdev: The TTM device. 270 * @tt: The struct ttm_tt. 271 * @flags: Flags that govern the backup behaviour. 272 * 273 * Update the page accounting and call ttm_pool_shrink_tt to free pages 274 * or back them up. 275 * 276 * Return: Number of pages freed or swapped out, or negative error code on 277 * error. 278 */ 279 long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, 280 const struct ttm_backup_flags flags) 281 { 282 long ret; 283 284 if (WARN_ON(IS_ERR_OR_NULL(tt->backup))) 285 return 0; 286 287 ret = ttm_pool_backup(&bdev->pool, tt, &flags); 288 if (ret > 0) { 289 tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; 290 tt->page_flags |= TTM_TT_FLAG_BACKED_UP; 291 } 292 293 return ret; 294 } 295 296 int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, 297 const struct ttm_operation_ctx *ctx) 298 { 299 int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx); 300 301 if (ret) 302 return ret; 303 304 tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; 305 306 return 0; 307 } 308 EXPORT_SYMBOL(ttm_tt_restore); 309 310 /** 311 * ttm_tt_swapout - swap out tt object 312 * 313 * @bdev: TTM device structure. 314 * @ttm: The struct ttm_tt. 315 * @gfp_flags: Flags to use for memory allocation. 316 * 317 * Swapout a TT object to a shmem_file, return number of pages swapped out or 318 * negative error code. 319 */ 320 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, 321 gfp_t gfp_flags) 322 { 323 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; 324 struct address_space *swap_space; 325 struct file *swap_storage; 326 struct page *from_page; 327 struct page *to_page; 328 int i, ret; 329 330 swap_storage = shmem_file_setup("ttm swap", size, 0); 331 if (IS_ERR(swap_storage)) { 332 pr_err("Failed allocating swap storage\n"); 333 return PTR_ERR(swap_storage); 334 } 335 336 swap_space = swap_storage->f_mapping; 337 gfp_flags &= mapping_gfp_mask(swap_space); 338 339 for (i = 0; i < ttm->num_pages; ++i) { 340 from_page = ttm->pages[i]; 341 if (unlikely(from_page == NULL)) 342 continue; 343 344 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags); 345 if (IS_ERR(to_page)) { 346 ret = PTR_ERR(to_page); 347 goto out_err; 348 } 349 copy_highpage(to_page, from_page); 350 set_page_dirty(to_page); 351 mark_page_accessed(to_page); 352 put_page(to_page); 353 } 354 355 ttm_tt_unpopulate(bdev, ttm); 356 ttm->swap_storage = swap_storage; 357 ttm->page_flags |= TTM_TT_FLAG_SWAPPED; 358 359 return ttm->num_pages; 360 361 out_err: 362 fput(swap_storage); 363 364 return ret; 365 } 366 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout); 367 368 int ttm_tt_populate(struct ttm_device *bdev, 369 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 370 { 371 int ret; 372 373 if (!ttm) 374 return -EINVAL; 375 376 if (ttm_tt_is_populated(ttm)) 377 return 0; 378 379 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 380 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); 381 if (bdev->pool.use_dma32) 382 atomic_long_add(ttm->num_pages, 383 &ttm_dma32_pages_allocated); 384 } 385 386 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit || 387 atomic_long_read(&ttm_dma32_pages_allocated) > 388 ttm_dma32_pages_limit) { 389 390 ret = ttm_global_swapout(ctx, GFP_KERNEL); 391 if (ret == 0) 392 break; 393 if (ret < 0) 394 goto error; 395 } 396 397 if (bdev->funcs->ttm_tt_populate) 398 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx); 399 else 400 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); 401 if (ret) 402 goto error; 403 404 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED; 405 ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP; 406 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) { 407 ret = ttm_tt_swapin(ttm); 408 if (unlikely(ret != 0)) { 409 ttm_tt_unpopulate(bdev, ttm); 410 return ret; 411 } 412 } 413 414 return 0; 415 416 error: 417 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 418 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); 419 if (bdev->pool.use_dma32) 420 atomic_long_sub(ttm->num_pages, 421 &ttm_dma32_pages_allocated); 422 } 423 return ret; 424 } 425 426 #if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST) 427 EXPORT_SYMBOL(ttm_tt_populate); 428 #endif 429 430 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) 431 { 432 if (!ttm_tt_is_populated(ttm)) 433 return; 434 435 if (bdev->funcs->ttm_tt_unpopulate) 436 bdev->funcs->ttm_tt_unpopulate(bdev, ttm); 437 else 438 ttm_pool_free(&bdev->pool, ttm); 439 440 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 441 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); 442 if (bdev->pool.use_dma32) 443 atomic_long_sub(ttm->num_pages, 444 &ttm_dma32_pages_allocated); 445 } 446 447 ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; 448 } 449 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate); 450 451 #ifdef CONFIG_DEBUG_FS 452 453 /* Test the shrinker functions and dump the result */ 454 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data) 455 { 456 struct ttm_operation_ctx ctx = { false, false }; 457 458 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL)); 459 return 0; 460 } 461 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink); 462 463 #endif 464 465 466 /* 467 * ttm_tt_mgr_init - register with the MM shrinker 468 * 469 * Register with the MM shrinker for swapping out BOs. 470 */ 471 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages) 472 { 473 #ifdef CONFIG_DEBUG_FS 474 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL, 475 &ttm_tt_debugfs_shrink_fops); 476 #endif 477 478 if (!ttm_pages_limit) 479 ttm_pages_limit = num_pages; 480 481 if (!ttm_dma32_pages_limit) 482 ttm_dma32_pages_limit = num_dma32_pages; 483 } 484 485 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter, 486 struct iosys_map *dmap, 487 pgoff_t i) 488 { 489 struct ttm_kmap_iter_tt *iter_tt = 490 container_of(iter, typeof(*iter_tt), base); 491 492 iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i], 493 iter_tt->prot)); 494 } 495 496 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter, 497 struct iosys_map *map) 498 { 499 kunmap_local(map->vaddr); 500 } 501 502 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = { 503 .map_local = ttm_kmap_iter_tt_map_local, 504 .unmap_local = ttm_kmap_iter_tt_unmap_local, 505 .maps_tt = true, 506 }; 507 508 /** 509 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt 510 * @iter_tt: The struct ttm_kmap_iter_tt to initialize. 511 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource. 512 * 513 * Return: Pointer to the embedded struct ttm_kmap_iter. 514 */ 515 struct ttm_kmap_iter * 516 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, 517 struct ttm_tt *tt) 518 { 519 iter_tt->base.ops = &ttm_kmap_iter_tt_ops; 520 iter_tt->tt = tt; 521 if (tt) 522 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL); 523 else 524 iter_tt->prot = PAGE_KERNEL; 525 526 return &iter_tt->base; 527 } 528 EXPORT_SYMBOL(ttm_kmap_iter_tt_init); 529 530 unsigned long ttm_tt_pages_limit(void) 531 { 532 return ttm_pages_limit; 533 } 534 EXPORT_SYMBOL(ttm_tt_pages_limit); 535 536 /** 537 * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt 538 * @tt: The ttm_tt for wich to allocate and assign a backup structure. 539 * 540 * Assign a backup structure to be used for tt backup. This should 541 * typically be done at bo creation, to avoid allocations at shrinking 542 * time. 543 * 544 * Return: 0 on success, negative error code on failure. 545 */ 546 int ttm_tt_setup_backup(struct ttm_tt *tt) 547 { 548 struct file *backup = 549 ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); 550 551 if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) 552 return -EINVAL; 553 554 if (IS_ERR(backup)) 555 return PTR_ERR(backup); 556 557 if (tt->backup) 558 ttm_backup_fini(tt->backup); 559 560 tt->backup = backup; 561 return 0; 562 } 563 EXPORT_SYMBOL(ttm_tt_setup_backup); 564