xref: /linux/drivers/gpu/drm/ttm/ttm_tt.c (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "drm_cache.h"
38 #include "ttm/ttm_module.h"
39 #include "ttm/ttm_bo_driver.h"
40 #include "ttm/ttm_placement.h"
41 
42 static int ttm_tt_swapin(struct ttm_tt *ttm);
43 
44 /**
45  * Allocates storage for pointers to the pages that back the ttm.
46  *
47  * Uses kmalloc if possible. Otherwise falls back to vmalloc.
48  */
49 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50 {
51 	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
52 	ttm->pages = NULL;
53 
54 	if (size <= PAGE_SIZE)
55 		ttm->pages = kzalloc(size, GFP_KERNEL);
56 
57 	if (!ttm->pages) {
58 		ttm->pages = vmalloc_user(size);
59 		if (ttm->pages)
60 			ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
61 	}
62 }
63 
64 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
65 {
66 	if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
67 		vfree(ttm->pages);
68 		ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
69 	} else {
70 		kfree(ttm->pages);
71 	}
72 	ttm->pages = NULL;
73 }
74 
75 static struct page *ttm_tt_alloc_page(unsigned page_flags)
76 {
77 	gfp_t gfp_flags = GFP_USER;
78 
79 	if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
80 		gfp_flags |= __GFP_ZERO;
81 
82 	if (page_flags & TTM_PAGE_FLAG_DMA32)
83 		gfp_flags |= __GFP_DMA32;
84 	else
85 		gfp_flags |= __GFP_HIGHMEM;
86 
87 	return alloc_page(gfp_flags);
88 }
89 
90 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
91 {
92 	int write;
93 	int dirty;
94 	struct page *page;
95 	int i;
96 	struct ttm_backend *be = ttm->be;
97 
98 	BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
99 	write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
100 	dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
101 
102 	if (be)
103 		be->func->clear(be);
104 
105 	for (i = 0; i < ttm->num_pages; ++i) {
106 		page = ttm->pages[i];
107 		if (page == NULL)
108 			continue;
109 
110 		if (page == ttm->dummy_read_page) {
111 			BUG_ON(write);
112 			continue;
113 		}
114 
115 		if (write && dirty && !PageReserved(page))
116 			set_page_dirty_lock(page);
117 
118 		ttm->pages[i] = NULL;
119 		ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
120 		put_page(page);
121 	}
122 	ttm->state = tt_unpopulated;
123 	ttm->first_himem_page = ttm->num_pages;
124 	ttm->last_lomem_page = -1;
125 }
126 
127 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
128 {
129 	struct page *p;
130 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
131 	int ret;
132 
133 	while (NULL == (p = ttm->pages[index])) {
134 		p = ttm_tt_alloc_page(ttm->page_flags);
135 
136 		if (!p)
137 			return NULL;
138 
139 		ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
140 		if (unlikely(ret != 0))
141 			goto out_err;
142 
143 		if (PageHighMem(p))
144 			ttm->pages[--ttm->first_himem_page] = p;
145 		else
146 			ttm->pages[++ttm->last_lomem_page] = p;
147 	}
148 	return p;
149 out_err:
150 	put_page(p);
151 	return NULL;
152 }
153 
154 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
155 {
156 	int ret;
157 
158 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
159 		ret = ttm_tt_swapin(ttm);
160 		if (unlikely(ret != 0))
161 			return NULL;
162 	}
163 	return __ttm_tt_get_page(ttm, index);
164 }
165 
166 int ttm_tt_populate(struct ttm_tt *ttm)
167 {
168 	struct page *page;
169 	unsigned long i;
170 	struct ttm_backend *be;
171 	int ret;
172 
173 	if (ttm->state != tt_unpopulated)
174 		return 0;
175 
176 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
177 		ret = ttm_tt_swapin(ttm);
178 		if (unlikely(ret != 0))
179 			return ret;
180 	}
181 
182 	be = ttm->be;
183 
184 	for (i = 0; i < ttm->num_pages; ++i) {
185 		page = __ttm_tt_get_page(ttm, i);
186 		if (!page)
187 			return -ENOMEM;
188 	}
189 
190 	be->func->populate(be, ttm->num_pages, ttm->pages,
191 			   ttm->dummy_read_page);
192 	ttm->state = tt_unbound;
193 	return 0;
194 }
195 EXPORT_SYMBOL(ttm_tt_populate);
196 
197 #ifdef CONFIG_X86
198 static inline int ttm_tt_set_page_caching(struct page *p,
199 					  enum ttm_caching_state c_old,
200 					  enum ttm_caching_state c_new)
201 {
202 	int ret = 0;
203 
204 	if (PageHighMem(p))
205 		return 0;
206 
207 	if (c_old != tt_cached) {
208 		/* p isn't in the default caching state, set it to
209 		 * writeback first to free its current memtype. */
210 
211 		ret = set_pages_wb(p, 1);
212 		if (ret)
213 			return ret;
214 	}
215 
216 	if (c_new == tt_wc)
217 		ret = set_memory_wc((unsigned long) page_address(p), 1);
218 	else if (c_new == tt_uncached)
219 		ret = set_pages_uc(p, 1);
220 
221 	return ret;
222 }
223 #else /* CONFIG_X86 */
224 static inline int ttm_tt_set_page_caching(struct page *p,
225 					  enum ttm_caching_state c_old,
226 					  enum ttm_caching_state c_new)
227 {
228 	return 0;
229 }
230 #endif /* CONFIG_X86 */
231 
232 /*
233  * Change caching policy for the linear kernel map
234  * for range of pages in a ttm.
235  */
236 
237 static int ttm_tt_set_caching(struct ttm_tt *ttm,
238 			      enum ttm_caching_state c_state)
239 {
240 	int i, j;
241 	struct page *cur_page;
242 	int ret;
243 
244 	if (ttm->caching_state == c_state)
245 		return 0;
246 
247 	if (c_state != tt_cached) {
248 		ret = ttm_tt_populate(ttm);
249 		if (unlikely(ret != 0))
250 			return ret;
251 	}
252 
253 	if (ttm->caching_state == tt_cached)
254 		drm_clflush_pages(ttm->pages, ttm->num_pages);
255 
256 	for (i = 0; i < ttm->num_pages; ++i) {
257 		cur_page = ttm->pages[i];
258 		if (likely(cur_page != NULL)) {
259 			ret = ttm_tt_set_page_caching(cur_page,
260 						      ttm->caching_state,
261 						      c_state);
262 			if (unlikely(ret != 0))
263 				goto out_err;
264 		}
265 	}
266 
267 	ttm->caching_state = c_state;
268 
269 	return 0;
270 
271 out_err:
272 	for (j = 0; j < i; ++j) {
273 		cur_page = ttm->pages[j];
274 		if (likely(cur_page != NULL)) {
275 			(void)ttm_tt_set_page_caching(cur_page, c_state,
276 						      ttm->caching_state);
277 		}
278 	}
279 
280 	return ret;
281 }
282 
283 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
284 {
285 	enum ttm_caching_state state;
286 
287 	if (placement & TTM_PL_FLAG_WC)
288 		state = tt_wc;
289 	else if (placement & TTM_PL_FLAG_UNCACHED)
290 		state = tt_uncached;
291 	else
292 		state = tt_cached;
293 
294 	return ttm_tt_set_caching(ttm, state);
295 }
296 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
297 
298 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
299 {
300 	int i;
301 	struct page *cur_page;
302 	struct ttm_backend *be = ttm->be;
303 
304 	if (be)
305 		be->func->clear(be);
306 	(void)ttm_tt_set_caching(ttm, tt_cached);
307 	for (i = 0; i < ttm->num_pages; ++i) {
308 		cur_page = ttm->pages[i];
309 		ttm->pages[i] = NULL;
310 		if (cur_page) {
311 			if (page_count(cur_page) != 1)
312 				printk(KERN_ERR TTM_PFX
313 				       "Erroneous page count. "
314 				       "Leaking pages.\n");
315 			ttm_mem_global_free_page(ttm->glob->mem_glob,
316 						 cur_page);
317 			__free_page(cur_page);
318 		}
319 	}
320 	ttm->state = tt_unpopulated;
321 	ttm->first_himem_page = ttm->num_pages;
322 	ttm->last_lomem_page = -1;
323 }
324 
325 void ttm_tt_destroy(struct ttm_tt *ttm)
326 {
327 	struct ttm_backend *be;
328 
329 	if (unlikely(ttm == NULL))
330 		return;
331 
332 	be = ttm->be;
333 	if (likely(be != NULL)) {
334 		be->func->destroy(be);
335 		ttm->be = NULL;
336 	}
337 
338 	if (likely(ttm->pages != NULL)) {
339 		if (ttm->page_flags & TTM_PAGE_FLAG_USER)
340 			ttm_tt_free_user_pages(ttm);
341 		else
342 			ttm_tt_free_alloced_pages(ttm);
343 
344 		ttm_tt_free_page_directory(ttm);
345 	}
346 
347 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
348 	    ttm->swap_storage)
349 		fput(ttm->swap_storage);
350 
351 	kfree(ttm);
352 }
353 
354 int ttm_tt_set_user(struct ttm_tt *ttm,
355 		    struct task_struct *tsk,
356 		    unsigned long start, unsigned long num_pages)
357 {
358 	struct mm_struct *mm = tsk->mm;
359 	int ret;
360 	int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
361 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
362 
363 	BUG_ON(num_pages != ttm->num_pages);
364 	BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
365 
366 	/**
367 	 * Account user pages as lowmem pages for now.
368 	 */
369 
370 	ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
371 				   false, false);
372 	if (unlikely(ret != 0))
373 		return ret;
374 
375 	down_read(&mm->mmap_sem);
376 	ret = get_user_pages(tsk, mm, start, num_pages,
377 			     write, 0, ttm->pages, NULL);
378 	up_read(&mm->mmap_sem);
379 
380 	if (ret != num_pages && write) {
381 		ttm_tt_free_user_pages(ttm);
382 		ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
383 		return -ENOMEM;
384 	}
385 
386 	ttm->tsk = tsk;
387 	ttm->start = start;
388 	ttm->state = tt_unbound;
389 
390 	return 0;
391 }
392 
393 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
394 			     uint32_t page_flags, struct page *dummy_read_page)
395 {
396 	struct ttm_bo_driver *bo_driver = bdev->driver;
397 	struct ttm_tt *ttm;
398 
399 	if (!bo_driver)
400 		return NULL;
401 
402 	ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
403 	if (!ttm)
404 		return NULL;
405 
406 	ttm->glob = bdev->glob;
407 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
408 	ttm->first_himem_page = ttm->num_pages;
409 	ttm->last_lomem_page = -1;
410 	ttm->caching_state = tt_cached;
411 	ttm->page_flags = page_flags;
412 
413 	ttm->dummy_read_page = dummy_read_page;
414 
415 	ttm_tt_alloc_page_directory(ttm);
416 	if (!ttm->pages) {
417 		ttm_tt_destroy(ttm);
418 		printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
419 		return NULL;
420 	}
421 	ttm->be = bo_driver->create_ttm_backend_entry(bdev);
422 	if (!ttm->be) {
423 		ttm_tt_destroy(ttm);
424 		printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
425 		return NULL;
426 	}
427 	ttm->state = tt_unpopulated;
428 	return ttm;
429 }
430 
431 void ttm_tt_unbind(struct ttm_tt *ttm)
432 {
433 	int ret;
434 	struct ttm_backend *be = ttm->be;
435 
436 	if (ttm->state == tt_bound) {
437 		ret = be->func->unbind(be);
438 		BUG_ON(ret);
439 		ttm->state = tt_unbound;
440 	}
441 }
442 
443 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
444 {
445 	int ret = 0;
446 	struct ttm_backend *be;
447 
448 	if (!ttm)
449 		return -EINVAL;
450 
451 	if (ttm->state == tt_bound)
452 		return 0;
453 
454 	be = ttm->be;
455 
456 	ret = ttm_tt_populate(ttm);
457 	if (ret)
458 		return ret;
459 
460 	ret = be->func->bind(be, bo_mem);
461 	if (ret) {
462 		printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
463 		return ret;
464 	}
465 
466 	ttm->state = tt_bound;
467 
468 	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
469 		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
470 	return 0;
471 }
472 EXPORT_SYMBOL(ttm_tt_bind);
473 
474 static int ttm_tt_swapin(struct ttm_tt *ttm)
475 {
476 	struct address_space *swap_space;
477 	struct file *swap_storage;
478 	struct page *from_page;
479 	struct page *to_page;
480 	void *from_virtual;
481 	void *to_virtual;
482 	int i;
483 	int ret;
484 
485 	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
486 		ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
487 				      ttm->num_pages);
488 		if (unlikely(ret != 0))
489 			return ret;
490 
491 		ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
492 		return 0;
493 	}
494 
495 	swap_storage = ttm->swap_storage;
496 	BUG_ON(swap_storage == NULL);
497 
498 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
499 
500 	for (i = 0; i < ttm->num_pages; ++i) {
501 		from_page = read_mapping_page(swap_space, i, NULL);
502 		if (IS_ERR(from_page))
503 			goto out_err;
504 		to_page = __ttm_tt_get_page(ttm, i);
505 		if (unlikely(to_page == NULL))
506 			goto out_err;
507 
508 		preempt_disable();
509 		from_virtual = kmap_atomic(from_page, KM_USER0);
510 		to_virtual = kmap_atomic(to_page, KM_USER1);
511 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
512 		kunmap_atomic(to_virtual, KM_USER1);
513 		kunmap_atomic(from_virtual, KM_USER0);
514 		preempt_enable();
515 		page_cache_release(from_page);
516 	}
517 
518 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
519 		fput(swap_storage);
520 	ttm->swap_storage = NULL;
521 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
522 
523 	return 0;
524 out_err:
525 	ttm_tt_free_alloced_pages(ttm);
526 	return -ENOMEM;
527 }
528 
529 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
530 {
531 	struct address_space *swap_space;
532 	struct file *swap_storage;
533 	struct page *from_page;
534 	struct page *to_page;
535 	void *from_virtual;
536 	void *to_virtual;
537 	int i;
538 
539 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
540 	BUG_ON(ttm->caching_state != tt_cached);
541 
542 	/*
543 	 * For user buffers, just unpin the pages, as there should be
544 	 * vma references.
545 	 */
546 
547 	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
548 		ttm_tt_free_user_pages(ttm);
549 		ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
550 		ttm->swap_storage = NULL;
551 		return 0;
552 	}
553 
554 	if (!persistant_swap_storage) {
555 		swap_storage = shmem_file_setup("ttm swap",
556 						ttm->num_pages << PAGE_SHIFT,
557 						0);
558 		if (unlikely(IS_ERR(swap_storage))) {
559 			printk(KERN_ERR "Failed allocating swap storage.\n");
560 			return -ENOMEM;
561 		}
562 	} else
563 		swap_storage = persistant_swap_storage;
564 
565 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
566 
567 	for (i = 0; i < ttm->num_pages; ++i) {
568 		from_page = ttm->pages[i];
569 		if (unlikely(from_page == NULL))
570 			continue;
571 		to_page = read_mapping_page(swap_space, i, NULL);
572 		if (unlikely(to_page == NULL))
573 			goto out_err;
574 
575 		preempt_disable();
576 		from_virtual = kmap_atomic(from_page, KM_USER0);
577 		to_virtual = kmap_atomic(to_page, KM_USER1);
578 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
579 		kunmap_atomic(to_virtual, KM_USER1);
580 		kunmap_atomic(from_virtual, KM_USER0);
581 		preempt_enable();
582 		set_page_dirty(to_page);
583 		mark_page_accessed(to_page);
584 		page_cache_release(to_page);
585 	}
586 
587 	ttm_tt_free_alloced_pages(ttm);
588 	ttm->swap_storage = swap_storage;
589 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
590 	if (persistant_swap_storage)
591 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
592 
593 	return 0;
594 out_err:
595 	if (!persistant_swap_storage)
596 		fput(swap_storage);
597 
598 	return -ENOMEM;
599 }
600