1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <linux/sched.h> 32 #include <linux/highmem.h> 33 #include <linux/pagemap.h> 34 #include <linux/file.h> 35 #include <linux/swap.h> 36 #include <linux/slab.h> 37 #include "drm_cache.h" 38 #include "drm_mem_util.h" 39 #include "ttm/ttm_module.h" 40 #include "ttm/ttm_bo_driver.h" 41 #include "ttm/ttm_placement.h" 42 #include "ttm/ttm_page_alloc.h" 43 44 static int ttm_tt_swapin(struct ttm_tt *ttm); 45 46 /** 47 * Allocates storage for pointers to the pages that back the ttm. 48 */ 49 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 50 { 51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); 52 ttm->dma_address = drm_calloc_large(ttm->num_pages, 53 sizeof(*ttm->dma_address)); 54 } 55 56 static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 57 { 58 drm_free_large(ttm->pages); 59 ttm->pages = NULL; 60 drm_free_large(ttm->dma_address); 61 ttm->dma_address = NULL; 62 } 63 64 static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 65 { 66 int write; 67 int dirty; 68 struct page *page; 69 int i; 70 struct ttm_backend *be = ttm->be; 71 72 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); 73 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); 74 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); 75 76 if (be) 77 be->func->clear(be); 78 79 for (i = 0; i < ttm->num_pages; ++i) { 80 page = ttm->pages[i]; 81 if (page == NULL) 82 continue; 83 84 if (page == ttm->dummy_read_page) { 85 BUG_ON(write); 86 continue; 87 } 88 89 if (write && dirty && !PageReserved(page)) 90 set_page_dirty_lock(page); 91 92 ttm->pages[i] = NULL; 93 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); 94 put_page(page); 95 } 96 ttm->state = tt_unpopulated; 97 ttm->first_himem_page = ttm->num_pages; 98 ttm->last_lomem_page = -1; 99 } 100 101 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 102 { 103 struct page *p; 104 struct list_head h; 105 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 106 int ret; 107 108 while (NULL == (p = ttm->pages[index])) { 109 110 INIT_LIST_HEAD(&h); 111 112 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, 113 &ttm->dma_address[index]); 114 115 if (ret != 0) 116 return NULL; 117 118 p = list_first_entry(&h, struct page, lru); 119 120 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 121 if (unlikely(ret != 0)) 122 goto out_err; 123 124 if (PageHighMem(p)) 125 ttm->pages[--ttm->first_himem_page] = p; 126 else 127 ttm->pages[++ttm->last_lomem_page] = p; 128 } 129 return p; 130 out_err: 131 put_page(p); 132 return NULL; 133 } 134 135 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) 136 { 137 int ret; 138 139 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 140 ret = ttm_tt_swapin(ttm); 141 if (unlikely(ret != 0)) 142 return NULL; 143 } 144 return __ttm_tt_get_page(ttm, index); 145 } 146 147 int ttm_tt_populate(struct ttm_tt *ttm) 148 { 149 struct page *page; 150 unsigned long i; 151 struct ttm_backend *be; 152 int ret; 153 154 if (ttm->state != tt_unpopulated) 155 return 0; 156 157 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 158 ret = ttm_tt_swapin(ttm); 159 if (unlikely(ret != 0)) 160 return ret; 161 } 162 163 be = ttm->be; 164 165 for (i = 0; i < ttm->num_pages; ++i) { 166 page = __ttm_tt_get_page(ttm, i); 167 if (!page) 168 return -ENOMEM; 169 } 170 171 be->func->populate(be, ttm->num_pages, ttm->pages, 172 ttm->dummy_read_page, ttm->dma_address); 173 ttm->state = tt_unbound; 174 return 0; 175 } 176 EXPORT_SYMBOL(ttm_tt_populate); 177 178 #ifdef CONFIG_X86 179 static inline int ttm_tt_set_page_caching(struct page *p, 180 enum ttm_caching_state c_old, 181 enum ttm_caching_state c_new) 182 { 183 int ret = 0; 184 185 if (PageHighMem(p)) 186 return 0; 187 188 if (c_old != tt_cached) { 189 /* p isn't in the default caching state, set it to 190 * writeback first to free its current memtype. */ 191 192 ret = set_pages_wb(p, 1); 193 if (ret) 194 return ret; 195 } 196 197 if (c_new == tt_wc) 198 ret = set_memory_wc((unsigned long) page_address(p), 1); 199 else if (c_new == tt_uncached) 200 ret = set_pages_uc(p, 1); 201 202 return ret; 203 } 204 #else /* CONFIG_X86 */ 205 static inline int ttm_tt_set_page_caching(struct page *p, 206 enum ttm_caching_state c_old, 207 enum ttm_caching_state c_new) 208 { 209 return 0; 210 } 211 #endif /* CONFIG_X86 */ 212 213 /* 214 * Change caching policy for the linear kernel map 215 * for range of pages in a ttm. 216 */ 217 218 static int ttm_tt_set_caching(struct ttm_tt *ttm, 219 enum ttm_caching_state c_state) 220 { 221 int i, j; 222 struct page *cur_page; 223 int ret; 224 225 if (ttm->caching_state == c_state) 226 return 0; 227 228 if (ttm->state == tt_unpopulated) { 229 /* Change caching but don't populate */ 230 ttm->caching_state = c_state; 231 return 0; 232 } 233 234 if (ttm->caching_state == tt_cached) 235 drm_clflush_pages(ttm->pages, ttm->num_pages); 236 237 for (i = 0; i < ttm->num_pages; ++i) { 238 cur_page = ttm->pages[i]; 239 if (likely(cur_page != NULL)) { 240 ret = ttm_tt_set_page_caching(cur_page, 241 ttm->caching_state, 242 c_state); 243 if (unlikely(ret != 0)) 244 goto out_err; 245 } 246 } 247 248 ttm->caching_state = c_state; 249 250 return 0; 251 252 out_err: 253 for (j = 0; j < i; ++j) { 254 cur_page = ttm->pages[j]; 255 if (likely(cur_page != NULL)) { 256 (void)ttm_tt_set_page_caching(cur_page, c_state, 257 ttm->caching_state); 258 } 259 } 260 261 return ret; 262 } 263 264 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 265 { 266 enum ttm_caching_state state; 267 268 if (placement & TTM_PL_FLAG_WC) 269 state = tt_wc; 270 else if (placement & TTM_PL_FLAG_UNCACHED) 271 state = tt_uncached; 272 else 273 state = tt_cached; 274 275 return ttm_tt_set_caching(ttm, state); 276 } 277 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 278 279 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 280 { 281 int i; 282 unsigned count = 0; 283 struct list_head h; 284 struct page *cur_page; 285 struct ttm_backend *be = ttm->be; 286 287 INIT_LIST_HEAD(&h); 288 289 if (be) 290 be->func->clear(be); 291 for (i = 0; i < ttm->num_pages; ++i) { 292 293 cur_page = ttm->pages[i]; 294 ttm->pages[i] = NULL; 295 if (cur_page) { 296 if (page_count(cur_page) != 1) 297 printk(KERN_ERR TTM_PFX 298 "Erroneous page count. " 299 "Leaking pages.\n"); 300 ttm_mem_global_free_page(ttm->glob->mem_glob, 301 cur_page); 302 list_add(&cur_page->lru, &h); 303 count++; 304 } 305 } 306 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, 307 ttm->dma_address); 308 ttm->state = tt_unpopulated; 309 ttm->first_himem_page = ttm->num_pages; 310 ttm->last_lomem_page = -1; 311 } 312 313 void ttm_tt_destroy(struct ttm_tt *ttm) 314 { 315 struct ttm_backend *be; 316 317 if (unlikely(ttm == NULL)) 318 return; 319 320 be = ttm->be; 321 if (likely(be != NULL)) { 322 be->func->destroy(be); 323 ttm->be = NULL; 324 } 325 326 if (likely(ttm->pages != NULL)) { 327 if (ttm->page_flags & TTM_PAGE_FLAG_USER) 328 ttm_tt_free_user_pages(ttm); 329 else 330 ttm_tt_free_alloced_pages(ttm); 331 332 ttm_tt_free_page_directory(ttm); 333 } 334 335 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && 336 ttm->swap_storage) 337 fput(ttm->swap_storage); 338 339 kfree(ttm); 340 } 341 342 int ttm_tt_set_user(struct ttm_tt *ttm, 343 struct task_struct *tsk, 344 unsigned long start, unsigned long num_pages) 345 { 346 struct mm_struct *mm = tsk->mm; 347 int ret; 348 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; 349 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 350 351 BUG_ON(num_pages != ttm->num_pages); 352 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); 353 354 /** 355 * Account user pages as lowmem pages for now. 356 */ 357 358 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, 359 false, false); 360 if (unlikely(ret != 0)) 361 return ret; 362 363 down_read(&mm->mmap_sem); 364 ret = get_user_pages(tsk, mm, start, num_pages, 365 write, 0, ttm->pages, NULL); 366 up_read(&mm->mmap_sem); 367 368 if (ret != num_pages && write) { 369 ttm_tt_free_user_pages(ttm); 370 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); 371 return -ENOMEM; 372 } 373 374 ttm->tsk = tsk; 375 ttm->start = start; 376 ttm->state = tt_unbound; 377 378 return 0; 379 } 380 381 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 382 uint32_t page_flags, struct page *dummy_read_page) 383 { 384 struct ttm_bo_driver *bo_driver = bdev->driver; 385 struct ttm_tt *ttm; 386 387 if (!bo_driver) 388 return NULL; 389 390 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); 391 if (!ttm) 392 return NULL; 393 394 ttm->glob = bdev->glob; 395 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 396 ttm->first_himem_page = ttm->num_pages; 397 ttm->last_lomem_page = -1; 398 ttm->caching_state = tt_cached; 399 ttm->page_flags = page_flags; 400 401 ttm->dummy_read_page = dummy_read_page; 402 403 ttm_tt_alloc_page_directory(ttm); 404 if (!ttm->pages) { 405 ttm_tt_destroy(ttm); 406 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 407 return NULL; 408 } 409 ttm->be = bo_driver->create_ttm_backend_entry(bdev); 410 if (!ttm->be) { 411 ttm_tt_destroy(ttm); 412 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); 413 return NULL; 414 } 415 ttm->state = tt_unpopulated; 416 return ttm; 417 } 418 419 void ttm_tt_unbind(struct ttm_tt *ttm) 420 { 421 int ret; 422 struct ttm_backend *be = ttm->be; 423 424 if (ttm->state == tt_bound) { 425 ret = be->func->unbind(be); 426 BUG_ON(ret); 427 ttm->state = tt_unbound; 428 } 429 } 430 431 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 432 { 433 int ret = 0; 434 struct ttm_backend *be; 435 436 if (!ttm) 437 return -EINVAL; 438 439 if (ttm->state == tt_bound) 440 return 0; 441 442 be = ttm->be; 443 444 ret = ttm_tt_populate(ttm); 445 if (ret) 446 return ret; 447 448 ret = be->func->bind(be, bo_mem); 449 if (unlikely(ret != 0)) 450 return ret; 451 452 ttm->state = tt_bound; 453 454 if (ttm->page_flags & TTM_PAGE_FLAG_USER) 455 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; 456 return 0; 457 } 458 EXPORT_SYMBOL(ttm_tt_bind); 459 460 static int ttm_tt_swapin(struct ttm_tt *ttm) 461 { 462 struct address_space *swap_space; 463 struct file *swap_storage; 464 struct page *from_page; 465 struct page *to_page; 466 void *from_virtual; 467 void *to_virtual; 468 int i; 469 int ret = -ENOMEM; 470 471 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 472 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 473 ttm->num_pages); 474 if (unlikely(ret != 0)) 475 return ret; 476 477 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 478 return 0; 479 } 480 481 swap_storage = ttm->swap_storage; 482 BUG_ON(swap_storage == NULL); 483 484 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 485 486 for (i = 0; i < ttm->num_pages; ++i) { 487 from_page = read_mapping_page(swap_space, i, NULL); 488 if (IS_ERR(from_page)) { 489 ret = PTR_ERR(from_page); 490 goto out_err; 491 } 492 to_page = __ttm_tt_get_page(ttm, i); 493 if (unlikely(to_page == NULL)) 494 goto out_err; 495 496 preempt_disable(); 497 from_virtual = kmap_atomic(from_page, KM_USER0); 498 to_virtual = kmap_atomic(to_page, KM_USER1); 499 memcpy(to_virtual, from_virtual, PAGE_SIZE); 500 kunmap_atomic(to_virtual, KM_USER1); 501 kunmap_atomic(from_virtual, KM_USER0); 502 preempt_enable(); 503 page_cache_release(from_page); 504 } 505 506 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) 507 fput(swap_storage); 508 ttm->swap_storage = NULL; 509 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 510 511 return 0; 512 out_err: 513 ttm_tt_free_alloced_pages(ttm); 514 return ret; 515 } 516 517 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 518 { 519 struct address_space *swap_space; 520 struct file *swap_storage; 521 struct page *from_page; 522 struct page *to_page; 523 void *from_virtual; 524 void *to_virtual; 525 int i; 526 int ret = -ENOMEM; 527 528 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 529 BUG_ON(ttm->caching_state != tt_cached); 530 531 /* 532 * For user buffers, just unpin the pages, as there should be 533 * vma references. 534 */ 535 536 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 537 ttm_tt_free_user_pages(ttm); 538 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 539 ttm->swap_storage = NULL; 540 return 0; 541 } 542 543 if (!persistant_swap_storage) { 544 swap_storage = shmem_file_setup("ttm swap", 545 ttm->num_pages << PAGE_SHIFT, 546 0); 547 if (unlikely(IS_ERR(swap_storage))) { 548 printk(KERN_ERR "Failed allocating swap storage.\n"); 549 return PTR_ERR(swap_storage); 550 } 551 } else 552 swap_storage = persistant_swap_storage; 553 554 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 555 556 for (i = 0; i < ttm->num_pages; ++i) { 557 from_page = ttm->pages[i]; 558 if (unlikely(from_page == NULL)) 559 continue; 560 to_page = read_mapping_page(swap_space, i, NULL); 561 if (unlikely(IS_ERR(to_page))) { 562 ret = PTR_ERR(to_page); 563 goto out_err; 564 } 565 preempt_disable(); 566 from_virtual = kmap_atomic(from_page, KM_USER0); 567 to_virtual = kmap_atomic(to_page, KM_USER1); 568 memcpy(to_virtual, from_virtual, PAGE_SIZE); 569 kunmap_atomic(to_virtual, KM_USER1); 570 kunmap_atomic(from_virtual, KM_USER0); 571 preempt_enable(); 572 set_page_dirty(to_page); 573 mark_page_accessed(to_page); 574 page_cache_release(to_page); 575 } 576 577 ttm_tt_free_alloced_pages(ttm); 578 ttm->swap_storage = swap_storage; 579 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 580 if (persistant_swap_storage) 581 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; 582 583 return 0; 584 out_err: 585 if (!persistant_swap_storage) 586 fput(swap_storage); 587 588 return ret; 589 } 590