xref: /linux/drivers/gpu/drm/ttm/ttm_bo_util.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <linux/export.h>
33 #include <linux/swap.h>
34 #include <linux/vmalloc.h>
35 
36 #include <drm/ttm/ttm_bo.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <drm/ttm/ttm_tt.h>
39 
40 #include <drm/drm_cache.h>
41 
42 #include "ttm_bo_internal.h"
43 
44 struct ttm_transfer_obj {
45 	struct ttm_buffer_object base;
46 	struct ttm_buffer_object *bo;
47 };
48 
ttm_mem_io_reserve(struct ttm_device * bdev,struct ttm_resource * mem)49 int ttm_mem_io_reserve(struct ttm_device *bdev,
50 		       struct ttm_resource *mem)
51 {
52 	if (mem->bus.offset || mem->bus.addr)
53 		return 0;
54 
55 	mem->bus.is_iomem = false;
56 	if (!bdev->funcs->io_mem_reserve)
57 		return 0;
58 
59 	return bdev->funcs->io_mem_reserve(bdev, mem);
60 }
61 
ttm_mem_io_free(struct ttm_device * bdev,struct ttm_resource * mem)62 void ttm_mem_io_free(struct ttm_device *bdev,
63 		     struct ttm_resource *mem)
64 {
65 	if (!mem)
66 		return;
67 
68 	if (!mem->bus.offset && !mem->bus.addr)
69 		return;
70 
71 	if (bdev->funcs->io_mem_free)
72 		bdev->funcs->io_mem_free(bdev, mem);
73 
74 	mem->bus.offset = 0;
75 	mem->bus.addr = NULL;
76 }
77 
78 /**
79  * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
80  * @clear: Whether to clear rather than copy.
81  * @num_pages: Number of pages of the operation.
82  * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
83  * @src_iter: A struct ttm_kmap_iter representing the source resource.
84  *
85  * This function is intended to be able to move out async under a
86  * dma-fence if desired.
87  */
ttm_move_memcpy(bool clear,u32 num_pages,struct ttm_kmap_iter * dst_iter,struct ttm_kmap_iter * src_iter)88 void ttm_move_memcpy(bool clear,
89 		     u32 num_pages,
90 		     struct ttm_kmap_iter *dst_iter,
91 		     struct ttm_kmap_iter *src_iter)
92 {
93 	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
94 	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
95 	struct iosys_map src_map, dst_map;
96 	pgoff_t i;
97 
98 	/* Single TTM move. NOP */
99 	if (dst_ops->maps_tt && src_ops->maps_tt)
100 		return;
101 
102 	/* Don't move nonexistent data. Clear destination instead. */
103 	if (clear) {
104 		for (i = 0; i < num_pages; ++i) {
105 			dst_ops->map_local(dst_iter, &dst_map, i);
106 			if (dst_map.is_iomem)
107 				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
108 			else
109 				memset(dst_map.vaddr, 0, PAGE_SIZE);
110 			if (dst_ops->unmap_local)
111 				dst_ops->unmap_local(dst_iter, &dst_map);
112 		}
113 		return;
114 	}
115 
116 	for (i = 0; i < num_pages; ++i) {
117 		dst_ops->map_local(dst_iter, &dst_map, i);
118 		src_ops->map_local(src_iter, &src_map, i);
119 
120 		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
121 
122 		if (src_ops->unmap_local)
123 			src_ops->unmap_local(src_iter, &src_map);
124 		if (dst_ops->unmap_local)
125 			dst_ops->unmap_local(dst_iter, &dst_map);
126 	}
127 }
128 EXPORT_SYMBOL(ttm_move_memcpy);
129 
130 /**
131  * ttm_bo_move_memcpy
132  *
133  * @bo: A pointer to a struct ttm_buffer_object.
134  * @ctx: operation context
135  * @dst_mem: struct ttm_resource indicating where to move.
136  *
137  * Fallback move function for a mappable buffer object in mappable memory.
138  * The function will, if successful,
139  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
140  * and update the (@bo)->mem placement flags. If unsuccessful, the old
141  * data remains untouched, and it's up to the caller to free the
142  * memory space indicated by @new_mem.
143  * Returns:
144  * !0: Failure.
145  */
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_resource * dst_mem)146 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
147 		       struct ttm_operation_ctx *ctx,
148 		       struct ttm_resource *dst_mem)
149 {
150 	struct ttm_device *bdev = bo->bdev;
151 	struct ttm_resource_manager *dst_man =
152 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
153 	struct ttm_tt *ttm = bo->ttm;
154 	struct ttm_resource *src_mem = bo->resource;
155 	struct ttm_resource_manager *src_man;
156 	union {
157 		struct ttm_kmap_iter_tt tt;
158 		struct ttm_kmap_iter_linear_io io;
159 	} _dst_iter, _src_iter;
160 	struct ttm_kmap_iter *dst_iter, *src_iter;
161 	bool clear;
162 	int ret = 0;
163 
164 	if (WARN_ON(!src_mem))
165 		return -EINVAL;
166 
167 	src_man = ttm_manager_type(bdev, src_mem->mem_type);
168 	if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
169 		    dst_man->use_tt)) {
170 		ret = ttm_bo_populate(bo, ctx);
171 		if (ret)
172 			return ret;
173 	}
174 
175 	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
176 	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
177 		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
178 	if (IS_ERR(dst_iter))
179 		return PTR_ERR(dst_iter);
180 
181 	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
182 	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
183 		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
184 	if (IS_ERR(src_iter)) {
185 		ret = PTR_ERR(src_iter);
186 		goto out_src_iter;
187 	}
188 
189 	clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
190 	if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
191 		ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
192 
193 	if (!src_iter->ops->maps_tt)
194 		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
195 	ttm_bo_move_sync_cleanup(bo, dst_mem);
196 
197 out_src_iter:
198 	if (!dst_iter->ops->maps_tt)
199 		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
200 
201 	return ret;
202 }
203 EXPORT_SYMBOL(ttm_bo_move_memcpy);
204 
ttm_transfered_destroy(struct ttm_buffer_object * bo)205 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
206 {
207 	struct ttm_transfer_obj *fbo;
208 
209 	fbo = container_of(bo, struct ttm_transfer_obj, base);
210 	dma_resv_fini(&fbo->base.base._resv);
211 	ttm_bo_put(fbo->bo);
212 	kfree(fbo);
213 }
214 
215 /**
216  * ttm_buffer_object_transfer
217  *
218  * @bo: A pointer to a struct ttm_buffer_object.
219  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
220  * holding the data of @bo with the old placement.
221  *
222  * This is a utility function that may be called after an accelerated move
223  * has been scheduled. A new buffer object is created as a placeholder for
224  * the old data while it's being copied. When that buffer object is idle,
225  * it can be destroyed, releasing the space of the old placement.
226  * Returns:
227  * !0: Failure.
228  */
229 
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)230 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
231 				      struct ttm_buffer_object **new_obj)
232 {
233 	struct ttm_transfer_obj *fbo;
234 	int ret;
235 
236 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
237 	if (!fbo)
238 		return -ENOMEM;
239 
240 	fbo->base = *bo;
241 
242 	/**
243 	 * Fix up members that we shouldn't copy directly:
244 	 * TODO: Explicit member copy would probably be better here.
245 	 */
246 
247 	atomic_inc(&ttm_glob.bo_count);
248 	drm_vma_node_reset(&fbo->base.base.vma_node);
249 
250 	kref_init(&fbo->base.kref);
251 	fbo->base.destroy = &ttm_transfered_destroy;
252 	fbo->base.pin_count = 0;
253 	if (bo->type != ttm_bo_type_sg)
254 		fbo->base.base.resv = &fbo->base.base._resv;
255 
256 	dma_resv_init(&fbo->base.base._resv);
257 	fbo->base.base.dev = NULL;
258 	ret = dma_resv_trylock(&fbo->base.base._resv);
259 	WARN_ON(!ret);
260 
261 	ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES);
262 	if (ret) {
263 		dma_resv_unlock(&fbo->base.base._resv);
264 		kfree(fbo);
265 		return ret;
266 	}
267 
268 	if (fbo->base.resource) {
269 		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
270 		bo->resource = NULL;
271 		ttm_bo_set_bulk_move(&fbo->base, NULL);
272 	} else {
273 		fbo->base.bulk_move = NULL;
274 	}
275 
276 	ttm_bo_get(bo);
277 	fbo->bo = bo;
278 
279 	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
280 
281 	*new_obj = &fbo->base;
282 	return 0;
283 }
284 
285 /**
286  * ttm_io_prot
287  *
288  * @bo: ttm buffer object
289  * @res: ttm resource object
290  * @tmp: Page protection flag for a normal, cached mapping.
291  *
292  * Utility function that returns the pgprot_t that should be used for
293  * setting up a PTE with the caching model indicated by @c_state.
294  */
ttm_io_prot(struct ttm_buffer_object * bo,struct ttm_resource * res,pgprot_t tmp)295 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
296 		     pgprot_t tmp)
297 {
298 	struct ttm_resource_manager *man;
299 	enum ttm_caching caching;
300 
301 	man = ttm_manager_type(bo->bdev, res->mem_type);
302 	if (man->use_tt) {
303 		caching = bo->ttm->caching;
304 		if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
305 			tmp = pgprot_decrypted(tmp);
306 	} else  {
307 		caching = res->bus.caching;
308 	}
309 
310 	return ttm_prot_from_caching(caching, tmp);
311 }
312 EXPORT_SYMBOL(ttm_io_prot);
313 
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)314 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
315 			  unsigned long offset,
316 			  unsigned long size,
317 			  struct ttm_bo_kmap_obj *map)
318 {
319 	struct ttm_resource *mem = bo->resource;
320 
321 	if (bo->resource->bus.addr) {
322 		map->bo_kmap_type = ttm_bo_map_premapped;
323 		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
324 	} else {
325 		resource_size_t res = bo->resource->bus.offset + offset;
326 
327 		map->bo_kmap_type = ttm_bo_map_iomap;
328 		if (mem->bus.caching == ttm_write_combined)
329 			map->virtual = ioremap_wc(res, size);
330 #ifdef CONFIG_X86
331 		else if (mem->bus.caching == ttm_cached)
332 			map->virtual = ioremap_cache(res, size);
333 #endif
334 		else
335 			map->virtual = ioremap(res, size);
336 	}
337 	return (!map->virtual) ? -ENOMEM : 0;
338 }
339 
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)340 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
341 			   unsigned long start_page,
342 			   unsigned long num_pages,
343 			   struct ttm_bo_kmap_obj *map)
344 {
345 	struct ttm_resource *mem = bo->resource;
346 	struct ttm_operation_ctx ctx = {
347 		.interruptible = false,
348 		.no_wait_gpu = false
349 	};
350 	struct ttm_tt *ttm = bo->ttm;
351 	struct ttm_resource_manager *man =
352 			ttm_manager_type(bo->bdev, bo->resource->mem_type);
353 	pgprot_t prot;
354 	int ret;
355 
356 	BUG_ON(!ttm);
357 
358 	ret = ttm_bo_populate(bo, &ctx);
359 	if (ret)
360 		return ret;
361 
362 	if (num_pages == 1 && ttm->caching == ttm_cached &&
363 	    !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
364 		/*
365 		 * We're mapping a single page, and the desired
366 		 * page protection is consistent with the bo.
367 		 */
368 
369 		map->bo_kmap_type = ttm_bo_map_kmap;
370 		map->page = ttm->pages[start_page];
371 		map->virtual = kmap(map->page);
372 	} else {
373 		/*
374 		 * We need to use vmap to get the desired page protection
375 		 * or to make the buffer object look contiguous.
376 		 */
377 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
378 		map->bo_kmap_type = ttm_bo_map_vmap;
379 		map->virtual = vmap(ttm->pages + start_page, num_pages,
380 				    0, prot);
381 	}
382 	return (!map->virtual) ? -ENOMEM : 0;
383 }
384 
385 /**
386  * ttm_bo_kmap_try_from_panic
387  *
388  * @bo: The buffer object
389  * @page: The page to map
390  *
391  * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
392  * This should only be called from the panic handler, if you make sure the bo
393  * is the one being displayed, so is properly allocated, and protected.
394  *
395  * Returns the vaddr, that you can use to write to the bo, and that you should
396  * pass to kunmap_local() when you're done with this page, or NULL if the bo
397  * is in iomem.
398  */
ttm_bo_kmap_try_from_panic(struct ttm_buffer_object * bo,unsigned long page)399 void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
400 {
401 	if (page + 1 > PFN_UP(bo->resource->size))
402 		return NULL;
403 
404 	if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
405 		return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
406 
407 	return NULL;
408 }
409 EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
410 
411 /**
412  * ttm_bo_kmap
413  *
414  * @bo: The buffer object.
415  * @start_page: The first page to map.
416  * @num_pages: Number of pages to map.
417  * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
418  *
419  * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
420  * data in the buffer object. The ttm_kmap_obj_virtual function can then be
421  * used to obtain a virtual address to the data.
422  *
423  * Returns
424  * -ENOMEM: Out of memory.
425  * -EINVAL: Invalid range.
426  */
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)427 int ttm_bo_kmap(struct ttm_buffer_object *bo,
428 		unsigned long start_page, unsigned long num_pages,
429 		struct ttm_bo_kmap_obj *map)
430 {
431 	unsigned long offset, size;
432 	int ret;
433 
434 	map->virtual = NULL;
435 	map->bo = bo;
436 	if (num_pages > PFN_UP(bo->resource->size))
437 		return -EINVAL;
438 	if ((start_page + num_pages) > PFN_UP(bo->resource->size))
439 		return -EINVAL;
440 
441 	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
442 	if (ret)
443 		return ret;
444 	if (!bo->resource->bus.is_iomem) {
445 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
446 	} else {
447 		offset = start_page << PAGE_SHIFT;
448 		size = num_pages << PAGE_SHIFT;
449 		return ttm_bo_ioremap(bo, offset, size, map);
450 	}
451 }
452 EXPORT_SYMBOL(ttm_bo_kmap);
453 
454 /**
455  * ttm_bo_kunmap
456  *
457  * @map: Object describing the map to unmap.
458  *
459  * Unmaps a kernel map set up by ttm_bo_kmap.
460  */
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)461 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
462 {
463 	if (!map->virtual)
464 		return;
465 	switch (map->bo_kmap_type) {
466 	case ttm_bo_map_iomap:
467 		iounmap(map->virtual);
468 		break;
469 	case ttm_bo_map_vmap:
470 		vunmap(map->virtual);
471 		break;
472 	case ttm_bo_map_kmap:
473 		kunmap(map->page);
474 		break;
475 	case ttm_bo_map_premapped:
476 		break;
477 	default:
478 		BUG();
479 	}
480 	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
481 	map->virtual = NULL;
482 	map->page = NULL;
483 }
484 EXPORT_SYMBOL(ttm_bo_kunmap);
485 
486 /**
487  * ttm_bo_vmap
488  *
489  * @bo: The buffer object.
490  * @map: pointer to a struct iosys_map representing the map.
491  *
492  * Sets up a kernel virtual mapping, using ioremap or vmap to the
493  * data in the buffer object. The parameter @map returns the virtual
494  * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
495  *
496  * Returns
497  * -ENOMEM: Out of memory.
498  * -EINVAL: Invalid range.
499  */
ttm_bo_vmap(struct ttm_buffer_object * bo,struct iosys_map * map)500 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
501 {
502 	struct ttm_resource *mem = bo->resource;
503 	int ret;
504 
505 	dma_resv_assert_held(bo->base.resv);
506 
507 	ret = ttm_mem_io_reserve(bo->bdev, mem);
508 	if (ret)
509 		return ret;
510 
511 	if (mem->bus.is_iomem) {
512 		void __iomem *vaddr_iomem;
513 
514 		if (mem->bus.addr)
515 			vaddr_iomem = (void __iomem *)mem->bus.addr;
516 		else if (mem->bus.caching == ttm_write_combined)
517 			vaddr_iomem = ioremap_wc(mem->bus.offset,
518 						 bo->base.size);
519 #ifdef CONFIG_X86
520 		else if (mem->bus.caching == ttm_cached)
521 			vaddr_iomem = ioremap_cache(mem->bus.offset,
522 						  bo->base.size);
523 #endif
524 		else
525 			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
526 
527 		if (!vaddr_iomem)
528 			return -ENOMEM;
529 
530 		iosys_map_set_vaddr_iomem(map, vaddr_iomem);
531 
532 	} else {
533 		struct ttm_operation_ctx ctx = {
534 			.interruptible = false,
535 			.no_wait_gpu = false
536 		};
537 		struct ttm_tt *ttm = bo->ttm;
538 		pgprot_t prot;
539 		void *vaddr;
540 
541 		ret = ttm_bo_populate(bo, &ctx);
542 		if (ret)
543 			return ret;
544 
545 		/*
546 		 * We need to use vmap to get the desired page protection
547 		 * or to make the buffer object look contiguous.
548 		 */
549 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
550 		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
551 		if (!vaddr)
552 			return -ENOMEM;
553 
554 		iosys_map_set_vaddr(map, vaddr);
555 	}
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL(ttm_bo_vmap);
560 
561 /**
562  * ttm_bo_vunmap
563  *
564  * @bo: The buffer object.
565  * @map: Object describing the map to unmap.
566  *
567  * Unmaps a kernel map set up by ttm_bo_vmap().
568  */
ttm_bo_vunmap(struct ttm_buffer_object * bo,struct iosys_map * map)569 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
570 {
571 	struct ttm_resource *mem = bo->resource;
572 
573 	dma_resv_assert_held(bo->base.resv);
574 
575 	if (iosys_map_is_null(map))
576 		return;
577 
578 	if (!map->is_iomem)
579 		vunmap(map->vaddr);
580 	else if (!mem->bus.addr)
581 		iounmap(map->vaddr_iomem);
582 	iosys_map_clear(map);
583 
584 	ttm_mem_io_free(bo->bdev, bo->resource);
585 }
586 EXPORT_SYMBOL(ttm_bo_vunmap);
587 
ttm_bo_wait_free_node(struct ttm_buffer_object * bo,bool dst_use_tt)588 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
589 				 bool dst_use_tt)
590 {
591 	long ret;
592 
593 	ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
594 				    false, 15 * HZ);
595 	if (ret == 0)
596 		return -EBUSY;
597 	if (ret < 0)
598 		return ret;
599 
600 	if (!dst_use_tt)
601 		ttm_bo_tt_destroy(bo);
602 	ttm_resource_free(bo, &bo->resource);
603 	return 0;
604 }
605 
ttm_bo_move_to_ghost(struct ttm_buffer_object * bo,struct dma_fence * fence,bool dst_use_tt)606 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
607 				struct dma_fence *fence,
608 				bool dst_use_tt)
609 {
610 	struct ttm_buffer_object *ghost_obj;
611 	int ret;
612 
613 	/**
614 	 * This should help pipeline ordinary buffer moves.
615 	 *
616 	 * Hang old buffer memory on a new buffer object,
617 	 * and leave it to be released when the GPU
618 	 * operation has completed.
619 	 */
620 
621 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
622 	if (ret)
623 		return ret;
624 
625 	dma_resv_add_fence(&ghost_obj->base._resv, fence,
626 			   DMA_RESV_USAGE_KERNEL);
627 
628 	/**
629 	 * If we're not moving to fixed memory, the TTM object
630 	 * needs to stay alive. Otherwhise hang it on the ghost
631 	 * bo to be unbound and destroyed.
632 	 */
633 
634 	if (dst_use_tt)
635 		ghost_obj->ttm = NULL;
636 	else
637 		bo->ttm = NULL;
638 
639 	dma_resv_unlock(&ghost_obj->base._resv);
640 	ttm_bo_put(ghost_obj);
641 	return 0;
642 }
643 
ttm_bo_move_pipeline_evict(struct ttm_buffer_object * bo,struct dma_fence * fence)644 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
645 				       struct dma_fence *fence)
646 {
647 	struct ttm_device *bdev = bo->bdev;
648 	struct ttm_resource_manager *from;
649 	struct dma_fence *tmp;
650 	int i;
651 
652 	from = ttm_manager_type(bdev, bo->resource->mem_type);
653 
654 	/**
655 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
656 	 * this eviction and free up the allocation.
657 	 * The fence will be saved in the first free slot or in the slot
658 	 * already used to store a fence from the same context. Since
659 	 * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for
660 	 * evictions we should always find a slot to use.
661 	 */
662 	spin_lock(&from->eviction_lock);
663 	for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
664 		tmp = from->eviction_fences[i];
665 		if (!tmp)
666 			break;
667 		if (fence->context != tmp->context)
668 			continue;
669 		if (dma_fence_is_later(fence, tmp)) {
670 			dma_fence_put(tmp);
671 			break;
672 		}
673 		goto unlock;
674 	}
675 	if (i < TTM_NUM_MOVE_FENCES) {
676 		from->eviction_fences[i] = dma_fence_get(fence);
677 	} else {
678 		WARN(1, "not enough fence slots for all fence contexts");
679 		spin_unlock(&from->eviction_lock);
680 		dma_fence_wait(fence, false);
681 		goto end;
682 	}
683 
684 unlock:
685 	spin_unlock(&from->eviction_lock);
686 end:
687 	ttm_resource_free(bo, &bo->resource);
688 }
689 
690 /**
691  * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
692  *
693  * @bo: A pointer to a struct ttm_buffer_object.
694  * @fence: A fence object that signals when moving is complete.
695  * @evict: This is an evict move. Don't return until the buffer is idle.
696  * @pipeline: evictions are to be pipelined.
697  * @new_mem: struct ttm_resource indicating where to move.
698  *
699  * Accelerated move function to be called when an accelerated move
700  * has been scheduled. The function will create a new temporary buffer object
701  * representing the old placement, and put the sync object on both buffer
702  * objects. After that the newly created buffer object is unref'd to be
703  * destroyed when the move is complete. This will help pipeline
704  * buffer moves.
705  */
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,bool pipeline,struct ttm_resource * new_mem)706 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
707 			      struct dma_fence *fence,
708 			      bool evict,
709 			      bool pipeline,
710 			      struct ttm_resource *new_mem)
711 {
712 	struct ttm_device *bdev = bo->bdev;
713 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
714 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
715 	int ret = 0;
716 
717 	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
718 	if (!evict)
719 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
720 	else if (!from->use_tt && pipeline)
721 		ttm_bo_move_pipeline_evict(bo, fence);
722 	else
723 		ret = ttm_bo_wait_free_node(bo, man->use_tt);
724 
725 	if (ret)
726 		return ret;
727 
728 	ttm_bo_assign_mem(bo, new_mem);
729 
730 	return 0;
731 }
732 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
733 
734 /**
735  * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
736  *
737  * @bo: A pointer to a struct ttm_buffer_object.
738  * @new_mem: struct ttm_resource indicating where to move.
739  *
740  * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
741  * by the caller to be idle. Typically used after memcpy buffer moves.
742  */
ttm_bo_move_sync_cleanup(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)743 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
744 			      struct ttm_resource *new_mem)
745 {
746 	struct ttm_device *bdev = bo->bdev;
747 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
748 	int ret;
749 
750 	ret = ttm_bo_wait_free_node(bo, man->use_tt);
751 	if (WARN_ON(ret))
752 		return;
753 
754 	ttm_bo_assign_mem(bo, new_mem);
755 }
756 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
757 
758 /**
759  * ttm_bo_pipeline_gutting - purge the contents of a bo
760  * @bo: The buffer object
761  *
762  * Purge the contents of a bo, async if the bo is not idle.
763  * After a successful call, the bo is left unpopulated in
764  * system placement. The function may wait uninterruptible
765  * for idle on OOM.
766  *
767  * Return: 0 if successful, negative error code on failure.
768  */
ttm_bo_pipeline_gutting(struct ttm_buffer_object * bo)769 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
770 {
771 	struct ttm_buffer_object *ghost;
772 	struct ttm_tt *ttm;
773 	int ret;
774 
775 	/* If already idle, no need for ghost object dance. */
776 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
777 		if (!bo->ttm) {
778 			/* See comment below about clearing. */
779 			ret = ttm_tt_create(bo, true);
780 			if (ret)
781 				return ret;
782 		} else {
783 			ttm_tt_unpopulate(bo->bdev, bo->ttm);
784 			if (bo->type == ttm_bo_type_device)
785 				ttm_tt_mark_for_clear(bo->ttm);
786 		}
787 		ttm_resource_free(bo, &bo->resource);
788 		return 0;
789 	}
790 
791 	/*
792 	 * We need an unpopulated ttm_tt after giving our current one,
793 	 * if any, to the ghost object. And we can't afford to fail
794 	 * creating one *after* the operation. If the bo subsequently gets
795 	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
796 	 * to avoid leaking sensitive information to user-space.
797 	 */
798 
799 	ttm = bo->ttm;
800 	bo->ttm = NULL;
801 	ret = ttm_tt_create(bo, true);
802 	swap(bo->ttm, ttm);
803 	if (ret)
804 		return ret;
805 
806 	ret = ttm_buffer_object_transfer(bo, &ghost);
807 	if (ret)
808 		goto error_destroy_tt;
809 
810 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
811 	/* Last resort, wait for the BO to be idle when we are OOM */
812 	if (ret) {
813 		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
814 				      false, MAX_SCHEDULE_TIMEOUT);
815 	}
816 
817 	dma_resv_unlock(&ghost->base._resv);
818 	ttm_bo_put(ghost);
819 	bo->ttm = ttm;
820 	return 0;
821 
822 error_destroy_tt:
823 	ttm_tt_destroy(bo->bdev, ttm);
824 	return ret;
825 }
826 
ttm_lru_walk_trylock(struct ttm_bo_lru_cursor * curs,struct ttm_buffer_object * bo)827 static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
828 				 struct ttm_buffer_object *bo)
829 {
830 	struct ttm_operation_ctx *ctx = curs->arg->ctx;
831 
832 	curs->needs_unlock = false;
833 
834 	if (dma_resv_trylock(bo->base.resv)) {
835 		curs->needs_unlock = true;
836 		return true;
837 	}
838 
839 	if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
840 		dma_resv_assert_held(bo->base.resv);
841 		return true;
842 	}
843 
844 	return false;
845 }
846 
ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor * curs,struct ttm_buffer_object * bo)847 static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
848 				   struct ttm_buffer_object *bo)
849 {
850 	struct ttm_lru_walk_arg *arg = curs->arg;
851 	struct dma_resv *resv = bo->base.resv;
852 	int ret;
853 
854 	if (arg->ctx->interruptible)
855 		ret = dma_resv_lock_interruptible(resv, arg->ticket);
856 	else
857 		ret = dma_resv_lock(resv, arg->ticket);
858 
859 	if (!ret) {
860 		curs->needs_unlock = true;
861 		/*
862 		 * Only a single ticketlock per loop. Ticketlocks are prone
863 		 * to return -EDEADLK causing the eviction to fail, so
864 		 * after waiting for the ticketlock, revert back to
865 		 * trylocking for this walk.
866 		 */
867 		arg->ticket = NULL;
868 	} else if (ret == -EDEADLK) {
869 		/* Caller needs to exit the ww transaction. */
870 		ret = -ENOSPC;
871 	}
872 
873 	return ret;
874 }
875 
876 /**
877  * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
878  * valid items.
879  * @walk: describe the walks and actions taken
880  * @bdev: The TTM device.
881  * @man: The struct ttm_resource manager whose LRU lists we're walking.
882  * @target: The end condition for the walk.
883  *
884  * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
885  * the corresponding ttm_buffer_object is locked and taken a reference on, and
886  * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
887  * that case, it's verified that the item actually remains on the LRU list after
888  * the lock, and that the buffer object didn't switch resource in between.
889  *
890  * With a locked object, the actions indicated by @walk->process_bo are
891  * performed, and after that, the bo is unlocked, the refcount dropped and the
892  * next struct ttm_resource is processed. Here, the walker relies on
893  * TTM's restartable LRU list implementation.
894  *
895  * Typically @walk->process_bo() would return the number of pages evicted,
896  * swapped or shrunken, so that when the total exceeds @target, or when the
897  * LRU list has been walked in full, iteration is terminated. It's also terminated
898  * on error. Note that the definition of @target is done by the caller, it
899  * could have a different meaning than the number of pages.
900  *
901  * Note that the way dma_resv individualization is done, locking needs to be done
902  * either with the LRU lock held (trylocking only) or with a reference on the
903  * object.
904  *
905  * Return: The progress made towards target or negative error code on error.
906  */
ttm_lru_walk_for_evict(struct ttm_lru_walk * walk,struct ttm_device * bdev,struct ttm_resource_manager * man,s64 target)907 s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
908 			   struct ttm_resource_manager *man, s64 target)
909 {
910 	struct ttm_bo_lru_cursor cursor;
911 	struct ttm_buffer_object *bo;
912 	s64 progress = 0;
913 	s64 lret;
914 
915 	ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
916 		lret = walk->ops->process_bo(walk, bo);
917 		if (lret == -EBUSY || lret == -EALREADY)
918 			lret = 0;
919 		progress = (lret < 0) ? lret : progress + lret;
920 		if (progress < 0 || progress >= target)
921 			break;
922 	}
923 	if (IS_ERR(bo))
924 		return PTR_ERR(bo);
925 
926 	return progress;
927 }
928 EXPORT_SYMBOL(ttm_lru_walk_for_evict);
929 
ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor * curs)930 static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
931 {
932 	struct ttm_buffer_object *bo = curs->bo;
933 
934 	if (bo) {
935 		if (curs->needs_unlock)
936 			dma_resv_unlock(bo->base.resv);
937 		ttm_bo_put(bo);
938 		curs->bo = NULL;
939 	}
940 }
941 
942 /**
943  * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
944  * and clean up any iteration it was used for.
945  * @curs: The cursor.
946  */
ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor * curs)947 void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
948 {
949 	spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
950 
951 	ttm_bo_lru_cursor_cleanup_bo(curs);
952 	spin_lock(lru_lock);
953 	ttm_resource_cursor_fini(&curs->res_curs);
954 	spin_unlock(lru_lock);
955 }
956 EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
957 
958 /**
959  * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
960  * @curs: The ttm_bo_lru_cursor to initialize.
961  * @man: The ttm resource_manager whose LRU lists to iterate over.
962  * @arg: The ttm_lru_walk_arg to govern the walk.
963  *
964  * Initialize a struct ttm_bo_lru_cursor.
965  *
966  * Return: Pointer to @curs. The function does not fail.
967  */
968 struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor * curs,struct ttm_resource_manager * man,struct ttm_lru_walk_arg * arg)969 ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
970 		       struct ttm_resource_manager *man,
971 		       struct ttm_lru_walk_arg *arg)
972 {
973 	memset(curs, 0, sizeof(*curs));
974 	ttm_resource_cursor_init(&curs->res_curs, man);
975 	curs->arg = arg;
976 
977 	return curs;
978 }
979 EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
980 
981 static struct ttm_buffer_object *
__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor * curs)982 __ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
983 {
984 	spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
985 	struct ttm_resource *res = NULL;
986 	struct ttm_buffer_object *bo;
987 	struct ttm_lru_walk_arg *arg = curs->arg;
988 	bool first = !curs->bo;
989 
990 	ttm_bo_lru_cursor_cleanup_bo(curs);
991 
992 	spin_lock(lru_lock);
993 	for (;;) {
994 		int mem_type, ret = 0;
995 		bool bo_locked = false;
996 
997 		if (first) {
998 			res = ttm_resource_manager_first(&curs->res_curs);
999 			first = false;
1000 		} else {
1001 			res = ttm_resource_manager_next(&curs->res_curs);
1002 		}
1003 		if (!res)
1004 			break;
1005 
1006 		bo = res->bo;
1007 		if (ttm_lru_walk_trylock(curs, bo))
1008 			bo_locked = true;
1009 		else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
1010 			continue;
1011 
1012 		if (!ttm_bo_get_unless_zero(bo)) {
1013 			if (curs->needs_unlock)
1014 				dma_resv_unlock(bo->base.resv);
1015 			continue;
1016 		}
1017 
1018 		mem_type = res->mem_type;
1019 		spin_unlock(lru_lock);
1020 		if (!bo_locked)
1021 			ret = ttm_lru_walk_ticketlock(curs, bo);
1022 
1023 		/*
1024 		 * Note that in between the release of the lru lock and the
1025 		 * ticketlock, the bo may have switched resource,
1026 		 * and also memory type, since the resource may have been
1027 		 * freed and allocated again with a different memory type.
1028 		 * In that case, just skip it.
1029 		 */
1030 		curs->bo = bo;
1031 		if (!ret && bo->resource && bo->resource->mem_type == mem_type)
1032 			return bo;
1033 
1034 		ttm_bo_lru_cursor_cleanup_bo(curs);
1035 		if (ret && ret != -EALREADY)
1036 			return ERR_PTR(ret);
1037 
1038 		spin_lock(lru_lock);
1039 	}
1040 
1041 	spin_unlock(lru_lock);
1042 	return res ? bo : NULL;
1043 }
1044 
1045 /**
1046  * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
1047  * to find and lock buffer object.
1048  * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
1049  * ttm_bo_lru_cursor_first().
1050  *
1051  * Return: A pointer to a locked and reference-counted buffer object,
1052  * or NULL if none could be found and looping should be terminated.
1053  */
ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor * curs)1054 struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
1055 {
1056 	return __ttm_bo_lru_cursor_next(curs);
1057 }
1058 EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
1059 
1060 /**
1061  * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
1062  * to find and lock buffer object.
1063  * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
1064  *
1065  * Return: A pointer to a locked and reference-counted buffer object,
1066  * or NULL if none could be found and looping should be terminated.
1067  */
ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor * curs)1068 struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
1069 {
1070 	ttm_bo_lru_cursor_cleanup_bo(curs);
1071 	return __ttm_bo_lru_cursor_next(curs);
1072 }
1073 EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
1074 
1075 /**
1076  * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
1077  * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
1078  * @bo: The buffer object.
1079  * @flags: Flags governing the shrinking behaviour.
1080  *
1081  * The function uses the ttm_tt_back_up functionality to back up or
1082  * purge a struct ttm_tt. If the bo is not in system, it's first
1083  * moved there.
1084  *
1085  * Return: The number of pages shrunken or purged, or
1086  * negative error code on failure.
1087  */
ttm_bo_shrink(struct ttm_operation_ctx * ctx,struct ttm_buffer_object * bo,const struct ttm_bo_shrink_flags flags)1088 long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1089 		   const struct ttm_bo_shrink_flags flags)
1090 {
1091 	static const struct ttm_place sys_placement_flags = {
1092 		.fpfn = 0,
1093 		.lpfn = 0,
1094 		.mem_type = TTM_PL_SYSTEM,
1095 		.flags = 0,
1096 	};
1097 	static struct ttm_placement sys_placement = {
1098 		.num_placement = 1,
1099 		.placement = &sys_placement_flags,
1100 	};
1101 	struct ttm_tt *tt = bo->ttm;
1102 	long lret;
1103 
1104 	dma_resv_assert_held(bo->base.resv);
1105 
1106 	if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
1107 		int ret = ttm_bo_validate(bo, &sys_placement, ctx);
1108 
1109 		/* Consider -ENOMEM and -ENOSPC non-fatal. */
1110 		if (ret) {
1111 			if (ret == -ENOMEM || ret == -ENOSPC)
1112 				ret = -EBUSY;
1113 			return ret;
1114 		}
1115 	}
1116 
1117 	ttm_bo_unmap_virtual(bo);
1118 	lret = ttm_bo_wait_ctx(bo, ctx);
1119 	if (lret < 0)
1120 		return lret;
1121 
1122 	if (bo->bulk_move) {
1123 		spin_lock(&bo->bdev->lru_lock);
1124 		ttm_resource_del_bulk_move(bo->resource, bo);
1125 		spin_unlock(&bo->bdev->lru_lock);
1126 	}
1127 
1128 	lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
1129 			     {.purge = flags.purge,
1130 			      .writeback = flags.writeback});
1131 
1132 	if (lret <= 0 && bo->bulk_move) {
1133 		spin_lock(&bo->bdev->lru_lock);
1134 		ttm_resource_add_bulk_move(bo->resource, bo);
1135 		spin_unlock(&bo->bdev->lru_lock);
1136 	}
1137 
1138 	if (lret < 0 && lret != -EINTR)
1139 		return -EBUSY;
1140 
1141 	return lret;
1142 }
1143 EXPORT_SYMBOL(ttm_bo_shrink);
1144 
1145 /**
1146  * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
1147  * @ctx: The struct ttm_operation_ctx governing the shrinking.
1148  * @bo: The candidate for shrinking.
1149  *
1150  * Check whether the object, given the information available to TTM,
1151  * is suitable for shinking, This function can and should be used
1152  * before attempting to shrink an object.
1153  *
1154  * Return: true if suitable. false if not.
1155  */
ttm_bo_shrink_suitable(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1156 bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1157 {
1158 	return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
1159 		(!ctx->no_wait_gpu ||
1160 		 dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
1161 }
1162 EXPORT_SYMBOL(ttm_bo_shrink_suitable);
1163 
1164 /**
1165  * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
1166  * during shrinking
1167  *
1168  * In some situations, like direct reclaim, waiting (in particular gpu waiting)
1169  * should be avoided since it may stall a system that could otherwise make progress
1170  * shrinking something else less time consuming.
1171  *
1172  * Return: true if gpu waiting should be avoided, false if not.
1173  */
ttm_bo_shrink_avoid_wait(void)1174 bool ttm_bo_shrink_avoid_wait(void)
1175 {
1176 	return !current_is_kswapd();
1177 }
1178 EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
1179