1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <linux/vmalloc.h> 32 #include <linux/sched.h> 33 #include <linux/highmem.h> 34 #include <linux/pagemap.h> 35 #include <linux/file.h> 36 #include <linux/swap.h> 37 #include "ttm/ttm_module.h" 38 #include "ttm/ttm_bo_driver.h" 39 #include "ttm/ttm_placement.h" 40 41 static int ttm_tt_swapin(struct ttm_tt *ttm); 42 43 #if defined(CONFIG_X86) 44 static void ttm_tt_clflush_page(struct page *page) 45 { 46 uint8_t *page_virtual; 47 unsigned int i; 48 49 if (unlikely(page == NULL)) 50 return; 51 52 page_virtual = kmap_atomic(page, KM_USER0); 53 54 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 55 clflush(page_virtual + i); 56 57 kunmap_atomic(page_virtual, KM_USER0); 58 } 59 60 static void ttm_tt_cache_flush_clflush(struct page *pages[], 61 unsigned long num_pages) 62 { 63 unsigned long i; 64 65 mb(); 66 for (i = 0; i < num_pages; ++i) 67 ttm_tt_clflush_page(*pages++); 68 mb(); 69 } 70 #elif !defined(__powerpc__) 71 static void ttm_tt_ipi_handler(void *null) 72 { 73 ; 74 } 75 #endif 76 77 void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) 78 { 79 80 #if defined(CONFIG_X86) 81 if (cpu_has_clflush) { 82 ttm_tt_cache_flush_clflush(pages, num_pages); 83 return; 84 } 85 #elif defined(__powerpc__) 86 unsigned long i; 87 88 for (i = 0; i < num_pages; ++i) { 89 struct page *page = pages[i]; 90 void *page_virtual; 91 92 if (unlikely(page == NULL)) 93 continue; 94 95 page_virtual = kmap_atomic(page, KM_USER0); 96 flush_dcache_range((unsigned long) page_virtual, 97 (unsigned long) page_virtual + PAGE_SIZE); 98 kunmap_atomic(page_virtual, KM_USER0); 99 } 100 #else 101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 102 printk(KERN_ERR TTM_PFX 103 "Timed out waiting for drm cache flush.\n"); 104 #endif 105 } 106 107 /** 108 * Allocates storage for pointers to the pages that back the ttm. 109 * 110 * Uses kmalloc if possible. Otherwise falls back to vmalloc. 111 */ 112 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 113 { 114 unsigned long size = ttm->num_pages * sizeof(*ttm->pages); 115 ttm->pages = NULL; 116 117 if (size <= PAGE_SIZE) 118 ttm->pages = kzalloc(size, GFP_KERNEL); 119 120 if (!ttm->pages) { 121 ttm->pages = vmalloc_user(size); 122 if (ttm->pages) 123 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; 124 } 125 } 126 127 static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 128 { 129 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { 130 vfree(ttm->pages); 131 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; 132 } else { 133 kfree(ttm->pages); 134 } 135 ttm->pages = NULL; 136 } 137 138 static struct page *ttm_tt_alloc_page(unsigned page_flags) 139 { 140 gfp_t gfp_flags = GFP_USER; 141 142 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 143 gfp_flags |= __GFP_ZERO; 144 145 if (page_flags & TTM_PAGE_FLAG_DMA32) 146 gfp_flags |= __GFP_DMA32; 147 else 148 gfp_flags |= __GFP_HIGHMEM; 149 150 return alloc_page(gfp_flags); 151 } 152 153 static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 154 { 155 int write; 156 int dirty; 157 struct page *page; 158 int i; 159 struct ttm_backend *be = ttm->be; 160 161 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); 162 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); 163 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); 164 165 if (be) 166 be->func->clear(be); 167 168 for (i = 0; i < ttm->num_pages; ++i) { 169 page = ttm->pages[i]; 170 if (page == NULL) 171 continue; 172 173 if (page == ttm->dummy_read_page) { 174 BUG_ON(write); 175 continue; 176 } 177 178 if (write && dirty && !PageReserved(page)) 179 set_page_dirty_lock(page); 180 181 ttm->pages[i] = NULL; 182 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); 183 put_page(page); 184 } 185 ttm->state = tt_unpopulated; 186 ttm->first_himem_page = ttm->num_pages; 187 ttm->last_lomem_page = -1; 188 } 189 190 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 191 { 192 struct page *p; 193 struct ttm_bo_device *bdev = ttm->bdev; 194 struct ttm_mem_global *mem_glob = bdev->mem_glob; 195 int ret; 196 197 while (NULL == (p = ttm->pages[index])) { 198 p = ttm_tt_alloc_page(ttm->page_flags); 199 200 if (!p) 201 return NULL; 202 203 if (PageHighMem(p)) { 204 ret = 205 ttm_mem_global_alloc(mem_glob, PAGE_SIZE, 206 false, false, true); 207 if (unlikely(ret != 0)) 208 goto out_err; 209 ttm->pages[--ttm->first_himem_page] = p; 210 } else { 211 ret = 212 ttm_mem_global_alloc(mem_glob, PAGE_SIZE, 213 false, false, false); 214 if (unlikely(ret != 0)) 215 goto out_err; 216 ttm->pages[++ttm->last_lomem_page] = p; 217 } 218 } 219 return p; 220 out_err: 221 put_page(p); 222 return NULL; 223 } 224 225 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) 226 { 227 int ret; 228 229 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 230 ret = ttm_tt_swapin(ttm); 231 if (unlikely(ret != 0)) 232 return NULL; 233 } 234 return __ttm_tt_get_page(ttm, index); 235 } 236 237 int ttm_tt_populate(struct ttm_tt *ttm) 238 { 239 struct page *page; 240 unsigned long i; 241 struct ttm_backend *be; 242 int ret; 243 244 if (ttm->state != tt_unpopulated) 245 return 0; 246 247 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 248 ret = ttm_tt_swapin(ttm); 249 if (unlikely(ret != 0)) 250 return ret; 251 } 252 253 be = ttm->be; 254 255 for (i = 0; i < ttm->num_pages; ++i) { 256 page = __ttm_tt_get_page(ttm, i); 257 if (!page) 258 return -ENOMEM; 259 } 260 261 be->func->populate(be, ttm->num_pages, ttm->pages, 262 ttm->dummy_read_page); 263 ttm->state = tt_unbound; 264 return 0; 265 } 266 267 #ifdef CONFIG_X86 268 static inline int ttm_tt_set_page_caching(struct page *p, 269 enum ttm_caching_state c_state) 270 { 271 if (PageHighMem(p)) 272 return 0; 273 274 switch (c_state) { 275 case tt_cached: 276 return set_pages_wb(p, 1); 277 case tt_wc: 278 return set_memory_wc((unsigned long) page_address(p), 1); 279 default: 280 return set_pages_uc(p, 1); 281 } 282 } 283 #else /* CONFIG_X86 */ 284 static inline int ttm_tt_set_page_caching(struct page *p, 285 enum ttm_caching_state c_state) 286 { 287 return 0; 288 } 289 #endif /* CONFIG_X86 */ 290 291 /* 292 * Change caching policy for the linear kernel map 293 * for range of pages in a ttm. 294 */ 295 296 static int ttm_tt_set_caching(struct ttm_tt *ttm, 297 enum ttm_caching_state c_state) 298 { 299 int i, j; 300 struct page *cur_page; 301 int ret; 302 303 if (ttm->caching_state == c_state) 304 return 0; 305 306 if (c_state != tt_cached) { 307 ret = ttm_tt_populate(ttm); 308 if (unlikely(ret != 0)) 309 return ret; 310 } 311 312 if (ttm->caching_state == tt_cached) 313 ttm_tt_cache_flush(ttm->pages, ttm->num_pages); 314 315 for (i = 0; i < ttm->num_pages; ++i) { 316 cur_page = ttm->pages[i]; 317 if (likely(cur_page != NULL)) { 318 ret = ttm_tt_set_page_caching(cur_page, c_state); 319 if (unlikely(ret != 0)) 320 goto out_err; 321 } 322 } 323 324 ttm->caching_state = c_state; 325 326 return 0; 327 328 out_err: 329 for (j = 0; j < i; ++j) { 330 cur_page = ttm->pages[j]; 331 if (likely(cur_page != NULL)) { 332 (void)ttm_tt_set_page_caching(cur_page, 333 ttm->caching_state); 334 } 335 } 336 337 return ret; 338 } 339 340 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 341 { 342 enum ttm_caching_state state; 343 344 if (placement & TTM_PL_FLAG_WC) 345 state = tt_wc; 346 else if (placement & TTM_PL_FLAG_UNCACHED) 347 state = tt_uncached; 348 else 349 state = tt_cached; 350 351 return ttm_tt_set_caching(ttm, state); 352 } 353 354 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 355 { 356 int i; 357 struct page *cur_page; 358 struct ttm_backend *be = ttm->be; 359 360 if (be) 361 be->func->clear(be); 362 (void)ttm_tt_set_caching(ttm, tt_cached); 363 for (i = 0; i < ttm->num_pages; ++i) { 364 cur_page = ttm->pages[i]; 365 ttm->pages[i] = NULL; 366 if (cur_page) { 367 if (page_count(cur_page) != 1) 368 printk(KERN_ERR TTM_PFX 369 "Erroneous page count. " 370 "Leaking pages.\n"); 371 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, 372 PageHighMem(cur_page)); 373 __free_page(cur_page); 374 } 375 } 376 ttm->state = tt_unpopulated; 377 ttm->first_himem_page = ttm->num_pages; 378 ttm->last_lomem_page = -1; 379 } 380 381 void ttm_tt_destroy(struct ttm_tt *ttm) 382 { 383 struct ttm_backend *be; 384 385 if (unlikely(ttm == NULL)) 386 return; 387 388 be = ttm->be; 389 if (likely(be != NULL)) { 390 be->func->destroy(be); 391 ttm->be = NULL; 392 } 393 394 if (likely(ttm->pages != NULL)) { 395 if (ttm->page_flags & TTM_PAGE_FLAG_USER) 396 ttm_tt_free_user_pages(ttm); 397 else 398 ttm_tt_free_alloced_pages(ttm); 399 400 ttm_tt_free_page_directory(ttm); 401 } 402 403 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && 404 ttm->swap_storage) 405 fput(ttm->swap_storage); 406 407 kfree(ttm); 408 } 409 410 int ttm_tt_set_user(struct ttm_tt *ttm, 411 struct task_struct *tsk, 412 unsigned long start, unsigned long num_pages) 413 { 414 struct mm_struct *mm = tsk->mm; 415 int ret; 416 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; 417 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; 418 419 BUG_ON(num_pages != ttm->num_pages); 420 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); 421 422 /** 423 * Account user pages as lowmem pages for now. 424 */ 425 426 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, 427 false, false, false); 428 if (unlikely(ret != 0)) 429 return ret; 430 431 down_read(&mm->mmap_sem); 432 ret = get_user_pages(tsk, mm, start, num_pages, 433 write, 0, ttm->pages, NULL); 434 up_read(&mm->mmap_sem); 435 436 if (ret != num_pages && write) { 437 ttm_tt_free_user_pages(ttm); 438 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); 439 return -ENOMEM; 440 } 441 442 ttm->tsk = tsk; 443 ttm->start = start; 444 ttm->state = tt_unbound; 445 446 return 0; 447 } 448 449 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 450 uint32_t page_flags, struct page *dummy_read_page) 451 { 452 struct ttm_bo_driver *bo_driver = bdev->driver; 453 struct ttm_tt *ttm; 454 455 if (!bo_driver) 456 return NULL; 457 458 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); 459 if (!ttm) 460 return NULL; 461 462 ttm->bdev = bdev; 463 464 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 465 ttm->first_himem_page = ttm->num_pages; 466 ttm->last_lomem_page = -1; 467 ttm->caching_state = tt_cached; 468 ttm->page_flags = page_flags; 469 470 ttm->dummy_read_page = dummy_read_page; 471 472 ttm_tt_alloc_page_directory(ttm); 473 if (!ttm->pages) { 474 ttm_tt_destroy(ttm); 475 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 476 return NULL; 477 } 478 ttm->be = bo_driver->create_ttm_backend_entry(bdev); 479 if (!ttm->be) { 480 ttm_tt_destroy(ttm); 481 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); 482 return NULL; 483 } 484 ttm->state = tt_unpopulated; 485 return ttm; 486 } 487 488 void ttm_tt_unbind(struct ttm_tt *ttm) 489 { 490 int ret; 491 struct ttm_backend *be = ttm->be; 492 493 if (ttm->state == tt_bound) { 494 ret = be->func->unbind(be); 495 BUG_ON(ret); 496 ttm->state = tt_unbound; 497 } 498 } 499 500 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 501 { 502 int ret = 0; 503 struct ttm_backend *be; 504 505 if (!ttm) 506 return -EINVAL; 507 508 if (ttm->state == tt_bound) 509 return 0; 510 511 be = ttm->be; 512 513 ret = ttm_tt_populate(ttm); 514 if (ret) 515 return ret; 516 517 ret = be->func->bind(be, bo_mem); 518 if (ret) { 519 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); 520 return ret; 521 } 522 523 ttm->state = tt_bound; 524 525 if (ttm->page_flags & TTM_PAGE_FLAG_USER) 526 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; 527 return 0; 528 } 529 EXPORT_SYMBOL(ttm_tt_bind); 530 531 static int ttm_tt_swapin(struct ttm_tt *ttm) 532 { 533 struct address_space *swap_space; 534 struct file *swap_storage; 535 struct page *from_page; 536 struct page *to_page; 537 void *from_virtual; 538 void *to_virtual; 539 int i; 540 int ret; 541 542 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 543 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 544 ttm->num_pages); 545 if (unlikely(ret != 0)) 546 return ret; 547 548 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 549 return 0; 550 } 551 552 swap_storage = ttm->swap_storage; 553 BUG_ON(swap_storage == NULL); 554 555 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 556 557 for (i = 0; i < ttm->num_pages; ++i) { 558 from_page = read_mapping_page(swap_space, i, NULL); 559 if (IS_ERR(from_page)) 560 goto out_err; 561 to_page = __ttm_tt_get_page(ttm, i); 562 if (unlikely(to_page == NULL)) 563 goto out_err; 564 565 preempt_disable(); 566 from_virtual = kmap_atomic(from_page, KM_USER0); 567 to_virtual = kmap_atomic(to_page, KM_USER1); 568 memcpy(to_virtual, from_virtual, PAGE_SIZE); 569 kunmap_atomic(to_virtual, KM_USER1); 570 kunmap_atomic(from_virtual, KM_USER0); 571 preempt_enable(); 572 page_cache_release(from_page); 573 } 574 575 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) 576 fput(swap_storage); 577 ttm->swap_storage = NULL; 578 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 579 580 return 0; 581 out_err: 582 ttm_tt_free_alloced_pages(ttm); 583 return -ENOMEM; 584 } 585 586 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 587 { 588 struct address_space *swap_space; 589 struct file *swap_storage; 590 struct page *from_page; 591 struct page *to_page; 592 void *from_virtual; 593 void *to_virtual; 594 int i; 595 596 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 597 BUG_ON(ttm->caching_state != tt_cached); 598 599 /* 600 * For user buffers, just unpin the pages, as there should be 601 * vma references. 602 */ 603 604 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 605 ttm_tt_free_user_pages(ttm); 606 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 607 ttm->swap_storage = NULL; 608 return 0; 609 } 610 611 if (!persistant_swap_storage) { 612 swap_storage = shmem_file_setup("ttm swap", 613 ttm->num_pages << PAGE_SHIFT, 614 0); 615 if (unlikely(IS_ERR(swap_storage))) { 616 printk(KERN_ERR "Failed allocating swap storage.\n"); 617 return -ENOMEM; 618 } 619 } else 620 swap_storage = persistant_swap_storage; 621 622 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 623 624 for (i = 0; i < ttm->num_pages; ++i) { 625 from_page = ttm->pages[i]; 626 if (unlikely(from_page == NULL)) 627 continue; 628 to_page = read_mapping_page(swap_space, i, NULL); 629 if (unlikely(to_page == NULL)) 630 goto out_err; 631 632 preempt_disable(); 633 from_virtual = kmap_atomic(from_page, KM_USER0); 634 to_virtual = kmap_atomic(to_page, KM_USER1); 635 memcpy(to_virtual, from_virtual, PAGE_SIZE); 636 kunmap_atomic(to_virtual, KM_USER1); 637 kunmap_atomic(from_virtual, KM_USER0); 638 preempt_enable(); 639 set_page_dirty(to_page); 640 mark_page_accessed(to_page); 641 page_cache_release(to_page); 642 } 643 644 ttm_tt_free_alloced_pages(ttm); 645 ttm->swap_storage = swap_storage; 646 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 647 if (persistant_swap_storage) 648 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; 649 650 return 0; 651 out_err: 652 if (!persistant_swap_storage) 653 fput(swap_storage); 654 655 return -ENOMEM; 656 } 657