xref: /linux/drivers/gpu/drm/ttm/ttm_tt.c (revision fa41f2877429646ce062a034a6d5bfc4f8f04b8c)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
42 
43 /**
44  * Allocates a ttm structure for the given BO.
45  */
46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47 {
48 	struct ttm_bo_device *bdev = bo->bdev;
49 	uint32_t page_flags = 0;
50 
51 	dma_resv_assert_held(bo->base.resv);
52 
53 	if (bo->ttm)
54 		return 0;
55 
56 	if (bdev->need_dma32)
57 		page_flags |= TTM_PAGE_FLAG_DMA32;
58 
59 	if (bdev->no_retry)
60 		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
61 
62 	switch (bo->type) {
63 	case ttm_bo_type_device:
64 		if (zero_alloc)
65 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
66 		break;
67 	case ttm_bo_type_kernel:
68 		break;
69 	case ttm_bo_type_sg:
70 		page_flags |= TTM_PAGE_FLAG_SG;
71 		break;
72 	default:
73 		pr_err("Illegal buffer object type\n");
74 		return -EINVAL;
75 	}
76 
77 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
78 	if (unlikely(bo->ttm == NULL))
79 		return -ENOMEM;
80 
81 	return 0;
82 }
83 
84 /**
85  * Allocates storage for pointers to the pages that back the ttm.
86  */
87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
88 {
89 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
90 			GFP_KERNEL | __GFP_ZERO);
91 	if (!ttm->pages)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
97 {
98 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
99 					  sizeof(*ttm->ttm.pages) +
100 					  sizeof(*ttm->dma_address),
101 					  GFP_KERNEL | __GFP_ZERO);
102 	if (!ttm->ttm.pages)
103 		return -ENOMEM;
104 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
105 	return 0;
106 }
107 
108 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
109 {
110 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
111 					  sizeof(*ttm->dma_address),
112 					  GFP_KERNEL | __GFP_ZERO);
113 	if (!ttm->dma_address)
114 		return -ENOMEM;
115 	return 0;
116 }
117 
118 static int ttm_tt_set_page_caching(struct page *p,
119 				   enum ttm_caching_state c_old,
120 				   enum ttm_caching_state c_new)
121 {
122 	int ret = 0;
123 
124 	if (PageHighMem(p))
125 		return 0;
126 
127 	if (c_old != tt_cached) {
128 		/* p isn't in the default caching state, set it to
129 		 * writeback first to free its current memtype. */
130 
131 		ret = ttm_set_pages_wb(p, 1);
132 		if (ret)
133 			return ret;
134 	}
135 
136 	if (c_new == tt_wc)
137 		ret = ttm_set_pages_wc(p, 1);
138 	else if (c_new == tt_uncached)
139 		ret = ttm_set_pages_uc(p, 1);
140 
141 	return ret;
142 }
143 
144 /*
145  * Change caching policy for the linear kernel map
146  * for range of pages in a ttm.
147  */
148 
149 static int ttm_tt_set_caching(struct ttm_tt *ttm,
150 			      enum ttm_caching_state c_state)
151 {
152 	int i, j;
153 	struct page *cur_page;
154 	int ret;
155 
156 	if (ttm->caching_state == c_state)
157 		return 0;
158 
159 	if (ttm->state == tt_unpopulated) {
160 		/* Change caching but don't populate */
161 		ttm->caching_state = c_state;
162 		return 0;
163 	}
164 
165 	if (ttm->caching_state == tt_cached)
166 		drm_clflush_pages(ttm->pages, ttm->num_pages);
167 
168 	for (i = 0; i < ttm->num_pages; ++i) {
169 		cur_page = ttm->pages[i];
170 		if (likely(cur_page != NULL)) {
171 			ret = ttm_tt_set_page_caching(cur_page,
172 						      ttm->caching_state,
173 						      c_state);
174 			if (unlikely(ret != 0))
175 				goto out_err;
176 		}
177 	}
178 
179 	ttm->caching_state = c_state;
180 
181 	return 0;
182 
183 out_err:
184 	for (j = 0; j < i; ++j) {
185 		cur_page = ttm->pages[j];
186 		if (likely(cur_page != NULL)) {
187 			(void)ttm_tt_set_page_caching(cur_page, c_state,
188 						      ttm->caching_state);
189 		}
190 	}
191 
192 	return ret;
193 }
194 
195 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
196 {
197 	enum ttm_caching_state state;
198 
199 	if (placement & TTM_PL_FLAG_WC)
200 		state = tt_wc;
201 	else if (placement & TTM_PL_FLAG_UNCACHED)
202 		state = tt_uncached;
203 	else
204 		state = tt_cached;
205 
206 	return ttm_tt_set_caching(ttm, state);
207 }
208 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
209 
210 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
211 {
212 	if (ttm == NULL)
213 		return;
214 
215 	ttm_tt_unbind(bdev, ttm);
216 
217 	if (ttm->state == tt_unbound)
218 		ttm_tt_unpopulate(bdev, ttm);
219 
220 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
221 	    ttm->swap_storage)
222 		fput(ttm->swap_storage);
223 
224 	ttm->swap_storage = NULL;
225 	ttm->func->destroy(bdev, ttm);
226 }
227 
228 static void ttm_tt_init_fields(struct ttm_tt *ttm,
229 			       struct ttm_buffer_object *bo,
230 			       uint32_t page_flags)
231 {
232 	ttm->num_pages = bo->num_pages;
233 	ttm->caching_state = tt_cached;
234 	ttm->page_flags = page_flags;
235 	ttm->state = tt_unpopulated;
236 	ttm->swap_storage = NULL;
237 	ttm->sg = bo->sg;
238 }
239 
240 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
241 		uint32_t page_flags)
242 {
243 	ttm_tt_init_fields(ttm, bo, page_flags);
244 
245 	if (ttm_tt_alloc_page_directory(ttm)) {
246 		pr_err("Failed allocating page table\n");
247 		return -ENOMEM;
248 	}
249 	return 0;
250 }
251 EXPORT_SYMBOL(ttm_tt_init);
252 
253 void ttm_tt_fini(struct ttm_tt *ttm)
254 {
255 	kvfree(ttm->pages);
256 	ttm->pages = NULL;
257 }
258 EXPORT_SYMBOL(ttm_tt_fini);
259 
260 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
261 		    uint32_t page_flags)
262 {
263 	struct ttm_tt *ttm = &ttm_dma->ttm;
264 
265 	ttm_tt_init_fields(ttm, bo, page_flags);
266 
267 	INIT_LIST_HEAD(&ttm_dma->pages_list);
268 	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
269 		pr_err("Failed allocating page table\n");
270 		return -ENOMEM;
271 	}
272 	return 0;
273 }
274 EXPORT_SYMBOL(ttm_dma_tt_init);
275 
276 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
277 		   uint32_t page_flags)
278 {
279 	struct ttm_tt *ttm = &ttm_dma->ttm;
280 	int ret;
281 
282 	ttm_tt_init_fields(ttm, bo, page_flags);
283 
284 	INIT_LIST_HEAD(&ttm_dma->pages_list);
285 	if (page_flags & TTM_PAGE_FLAG_SG)
286 		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
287 	else
288 		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
289 	if (ret) {
290 		pr_err("Failed allocating page table\n");
291 		return -ENOMEM;
292 	}
293 	return 0;
294 }
295 EXPORT_SYMBOL(ttm_sg_tt_init);
296 
297 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
298 {
299 	struct ttm_tt *ttm = &ttm_dma->ttm;
300 
301 	if (ttm->pages)
302 		kvfree(ttm->pages);
303 	else
304 		kvfree(ttm_dma->dma_address);
305 	ttm->pages = NULL;
306 	ttm_dma->dma_address = NULL;
307 }
308 EXPORT_SYMBOL(ttm_dma_tt_fini);
309 
310 void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
311 {
312 	if (ttm->state == tt_bound) {
313 		ttm->func->unbind(bdev, ttm);
314 		ttm->state = tt_unbound;
315 	}
316 }
317 
318 int ttm_tt_bind(struct ttm_bo_device *bdev,
319 		struct ttm_tt *ttm, struct ttm_resource *bo_mem,
320 		struct ttm_operation_ctx *ctx)
321 {
322 	int ret = 0;
323 
324 	if (!ttm)
325 		return -EINVAL;
326 
327 	if (ttm->state == tt_bound)
328 		return 0;
329 
330 	ret = ttm_tt_populate(bdev, ttm, ctx);
331 	if (ret)
332 		return ret;
333 
334 	ret = ttm->func->bind(bdev, ttm, bo_mem);
335 	if (unlikely(ret != 0))
336 		return ret;
337 
338 	ttm->state = tt_bound;
339 
340 	return 0;
341 }
342 EXPORT_SYMBOL(ttm_tt_bind);
343 
344 int ttm_tt_swapin(struct ttm_tt *ttm)
345 {
346 	struct address_space *swap_space;
347 	struct file *swap_storage;
348 	struct page *from_page;
349 	struct page *to_page;
350 	int i;
351 	int ret = -ENOMEM;
352 
353 	swap_storage = ttm->swap_storage;
354 	BUG_ON(swap_storage == NULL);
355 
356 	swap_space = swap_storage->f_mapping;
357 
358 	for (i = 0; i < ttm->num_pages; ++i) {
359 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
360 
361 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
362 		from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
363 
364 		if (IS_ERR(from_page)) {
365 			ret = PTR_ERR(from_page);
366 			goto out_err;
367 		}
368 		to_page = ttm->pages[i];
369 		if (unlikely(to_page == NULL))
370 			goto out_err;
371 
372 		copy_highpage(to_page, from_page);
373 		put_page(from_page);
374 	}
375 
376 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
377 		fput(swap_storage);
378 	ttm->swap_storage = NULL;
379 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
380 
381 	return 0;
382 out_err:
383 	return ret;
384 }
385 
386 int ttm_tt_swapout(struct ttm_bo_device *bdev,
387 		   struct ttm_tt *ttm, struct file *persistent_swap_storage)
388 {
389 	struct address_space *swap_space;
390 	struct file *swap_storage;
391 	struct page *from_page;
392 	struct page *to_page;
393 	int i;
394 	int ret = -ENOMEM;
395 
396 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
397 	BUG_ON(ttm->caching_state != tt_cached);
398 
399 	if (!persistent_swap_storage) {
400 		swap_storage = shmem_file_setup("ttm swap",
401 						ttm->num_pages << PAGE_SHIFT,
402 						0);
403 		if (IS_ERR(swap_storage)) {
404 			pr_err("Failed allocating swap storage\n");
405 			return PTR_ERR(swap_storage);
406 		}
407 	} else {
408 		swap_storage = persistent_swap_storage;
409 	}
410 
411 	swap_space = swap_storage->f_mapping;
412 
413 	for (i = 0; i < ttm->num_pages; ++i) {
414 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
415 
416 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
417 
418 		from_page = ttm->pages[i];
419 		if (unlikely(from_page == NULL))
420 			continue;
421 
422 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
423 		if (IS_ERR(to_page)) {
424 			ret = PTR_ERR(to_page);
425 			goto out_err;
426 		}
427 		copy_highpage(to_page, from_page);
428 		set_page_dirty(to_page);
429 		mark_page_accessed(to_page);
430 		put_page(to_page);
431 	}
432 
433 	ttm_tt_unpopulate(bdev, ttm);
434 	ttm->swap_storage = swap_storage;
435 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
436 	if (persistent_swap_storage)
437 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
438 
439 	return 0;
440 out_err:
441 	if (!persistent_swap_storage)
442 		fput(swap_storage);
443 
444 	return ret;
445 }
446 
447 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
448 {
449 	pgoff_t i;
450 
451 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
452 		return;
453 
454 	for (i = 0; i < ttm->num_pages; ++i)
455 		ttm->pages[i]->mapping = bdev->dev_mapping;
456 }
457 
458 int ttm_tt_populate(struct ttm_bo_device *bdev,
459 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
460 {
461 	int ret;
462 
463 	if (ttm->state != tt_unpopulated)
464 		return 0;
465 
466 	if (bdev->driver->ttm_tt_populate)
467 		ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
468 	else
469 		ret = ttm_pool_populate(ttm, ctx);
470 	if (!ret)
471 		ttm_tt_add_mapping(bdev, ttm);
472 	return ret;
473 }
474 
475 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
476 {
477 	pgoff_t i;
478 	struct page **page = ttm->pages;
479 
480 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
481 		return;
482 
483 	for (i = 0; i < ttm->num_pages; ++i) {
484 		(*page)->mapping = NULL;
485 		(*page++)->index = 0;
486 	}
487 }
488 
489 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
490 		       struct ttm_tt *ttm)
491 {
492 	if (ttm->state == tt_unpopulated)
493 		return;
494 
495 	ttm_tt_clear_mapping(ttm);
496 	if (bdev->driver->ttm_tt_unpopulate)
497 		bdev->driver->ttm_tt_unpopulate(bdev, ttm);
498 	else
499 		ttm_pool_unpopulate(ttm);
500 }
501