xref: /linux/drivers/gpu/drm/ttm/ttm_pool.c (revision 5488bec96bccbd87335921338f8dc38b87db7d2c)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47 
48 #include "ttm_module.h"
49 
50 /**
51  * struct ttm_pool_dma - Helper object for coherent DMA mappings
52  *
53  * @addr: original DMA address returned for the mapping
54  * @vaddr: original vaddr return for the mapping and order in the lower bits
55  */
56 struct ttm_pool_dma {
57 	dma_addr_t addr;
58 	unsigned long vaddr;
59 };
60 
61 /**
62  * struct ttm_pool_alloc_state - Current state of the tt page allocation process
63  * @pages: Pointer to the next tt page pointer to populate.
64  * @caching_divide: Pointer to the first page pointer whose page has a staged but
65  * not committed caching transition from write-back to @tt_caching.
66  * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
67  * @remaining_pages: Remaining pages to populate.
68  * @tt_caching: The requested cpu-caching for the pages allocated.
69  */
70 struct ttm_pool_alloc_state {
71 	struct page **pages;
72 	struct page **caching_divide;
73 	dma_addr_t *dma_addr;
74 	pgoff_t remaining_pages;
75 	enum ttm_caching tt_caching;
76 };
77 
78 static unsigned long page_pool_size;
79 
80 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
81 module_param(page_pool_size, ulong, 0644);
82 
83 static atomic_long_t allocated_pages;
84 
85 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
86 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
87 
88 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
89 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
90 
91 static spinlock_t shrinker_lock;
92 static struct list_head shrinker_list;
93 static struct shrinker *mm_shrinker;
94 static DECLARE_RWSEM(pool_shrink_rwsem);
95 
96 /* Allocate pages of size 1 << order with the given gfp_flags */
97 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
98 					unsigned int order)
99 {
100 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
101 	struct ttm_pool_dma *dma;
102 	struct page *p;
103 	void *vaddr;
104 
105 	/* Don't set the __GFP_COMP flag for higher order allocations.
106 	 * Mapping pages directly into an userspace process and calling
107 	 * put_page() on a TTM allocated page is illegal.
108 	 */
109 	if (order)
110 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
111 			__GFP_THISNODE;
112 
113 	if (!pool->use_dma_alloc) {
114 		p = alloc_pages_node(pool->nid, gfp_flags, order);
115 		if (p)
116 			p->private = order;
117 		return p;
118 	}
119 
120 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
121 	if (!dma)
122 		return NULL;
123 
124 	if (order)
125 		attr |= DMA_ATTR_NO_WARN;
126 
127 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
128 				&dma->addr, gfp_flags, attr);
129 	if (!vaddr)
130 		goto error_free;
131 
132 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
133 	 * TTM page fault handling and extend the DMA API to clean this up.
134 	 */
135 	if (is_vmalloc_addr(vaddr))
136 		p = vmalloc_to_page(vaddr);
137 	else
138 		p = virt_to_page(vaddr);
139 
140 	dma->vaddr = (unsigned long)vaddr | order;
141 	p->private = (unsigned long)dma;
142 	return p;
143 
144 error_free:
145 	kfree(dma);
146 	return NULL;
147 }
148 
149 /* Reset the caching and pages of size 1 << order */
150 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
151 			       unsigned int order, struct page *p)
152 {
153 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
154 	struct ttm_pool_dma *dma;
155 	void *vaddr;
156 
157 #ifdef CONFIG_X86
158 	/* We don't care that set_pages_wb is inefficient here. This is only
159 	 * used when we have to shrink and CPU overhead is irrelevant then.
160 	 */
161 	if (caching != ttm_cached && !PageHighMem(p))
162 		set_pages_wb(p, 1 << order);
163 #endif
164 
165 	if (!pool || !pool->use_dma_alloc) {
166 		__free_pages(p, order);
167 		return;
168 	}
169 
170 	if (order)
171 		attr |= DMA_ATTR_NO_WARN;
172 
173 	dma = (void *)p->private;
174 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
175 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
176 		       attr);
177 	kfree(dma);
178 }
179 
180 /* Apply any cpu-caching deferred during page allocation */
181 static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
182 {
183 #ifdef CONFIG_X86
184 	unsigned int num_pages = alloc->pages - alloc->caching_divide;
185 
186 	if (!num_pages)
187 		return 0;
188 
189 	switch (alloc->tt_caching) {
190 	case ttm_cached:
191 		break;
192 	case ttm_write_combined:
193 		return set_pages_array_wc(alloc->caching_divide, num_pages);
194 	case ttm_uncached:
195 		return set_pages_array_uc(alloc->caching_divide, num_pages);
196 	}
197 #endif
198 	alloc->caching_divide = alloc->pages;
199 	return 0;
200 }
201 
202 /* Map pages of 1 << order size and fill the DMA address array  */
203 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
204 			struct page *p, dma_addr_t **dma_addr)
205 {
206 	dma_addr_t addr;
207 	unsigned int i;
208 
209 	if (pool->use_dma_alloc) {
210 		struct ttm_pool_dma *dma = (void *)p->private;
211 
212 		addr = dma->addr;
213 	} else {
214 		size_t size = (1ULL << order) * PAGE_SIZE;
215 
216 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
217 		if (dma_mapping_error(pool->dev, addr))
218 			return -EFAULT;
219 	}
220 
221 	for (i = 1 << order; i ; --i) {
222 		*(*dma_addr)++ = addr;
223 		addr += PAGE_SIZE;
224 	}
225 
226 	return 0;
227 }
228 
229 /* Unmap pages of 1 << order size */
230 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
231 			   unsigned int num_pages)
232 {
233 	/* Unmapped while freeing the page */
234 	if (pool->use_dma_alloc)
235 		return;
236 
237 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
238 		       DMA_BIDIRECTIONAL);
239 }
240 
241 /* Give pages into a specific pool_type */
242 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
243 {
244 	unsigned int i, num_pages = 1 << pt->order;
245 
246 	for (i = 0; i < num_pages; ++i) {
247 		if (PageHighMem(p))
248 			clear_highpage(p + i);
249 		else
250 			clear_page(page_address(p + i));
251 	}
252 
253 	spin_lock(&pt->lock);
254 	list_add(&p->lru, &pt->pages);
255 	spin_unlock(&pt->lock);
256 	atomic_long_add(1 << pt->order, &allocated_pages);
257 }
258 
259 /* Take pages from a specific pool_type, return NULL when nothing available */
260 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
261 {
262 	struct page *p;
263 
264 	spin_lock(&pt->lock);
265 	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
266 	if (p) {
267 		atomic_long_sub(1 << pt->order, &allocated_pages);
268 		list_del(&p->lru);
269 	}
270 	spin_unlock(&pt->lock);
271 
272 	return p;
273 }
274 
275 /* Initialize and add a pool type to the global shrinker list */
276 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
277 			       enum ttm_caching caching, unsigned int order)
278 {
279 	pt->pool = pool;
280 	pt->caching = caching;
281 	pt->order = order;
282 	spin_lock_init(&pt->lock);
283 	INIT_LIST_HEAD(&pt->pages);
284 
285 	spin_lock(&shrinker_lock);
286 	list_add_tail(&pt->shrinker_list, &shrinker_list);
287 	spin_unlock(&shrinker_lock);
288 }
289 
290 /* Remove a pool_type from the global shrinker list and free all pages */
291 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
292 {
293 	struct page *p;
294 
295 	spin_lock(&shrinker_lock);
296 	list_del(&pt->shrinker_list);
297 	spin_unlock(&shrinker_lock);
298 
299 	while ((p = ttm_pool_type_take(pt)))
300 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
301 }
302 
303 /* Return the pool_type to use for the given caching and order */
304 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
305 						  enum ttm_caching caching,
306 						  unsigned int order)
307 {
308 	if (pool->use_dma_alloc)
309 		return &pool->caching[caching].orders[order];
310 
311 #ifdef CONFIG_X86
312 	switch (caching) {
313 	case ttm_write_combined:
314 		if (pool->nid != NUMA_NO_NODE)
315 			return &pool->caching[caching].orders[order];
316 
317 		if (pool->use_dma32)
318 			return &global_dma32_write_combined[order];
319 
320 		return &global_write_combined[order];
321 	case ttm_uncached:
322 		if (pool->nid != NUMA_NO_NODE)
323 			return &pool->caching[caching].orders[order];
324 
325 		if (pool->use_dma32)
326 			return &global_dma32_uncached[order];
327 
328 		return &global_uncached[order];
329 	default:
330 		break;
331 	}
332 #endif
333 
334 	return NULL;
335 }
336 
337 /* Free pages using the global shrinker list */
338 static unsigned int ttm_pool_shrink(void)
339 {
340 	struct ttm_pool_type *pt;
341 	unsigned int num_pages;
342 	struct page *p;
343 
344 	down_read(&pool_shrink_rwsem);
345 	spin_lock(&shrinker_lock);
346 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
347 	list_move_tail(&pt->shrinker_list, &shrinker_list);
348 	spin_unlock(&shrinker_lock);
349 
350 	p = ttm_pool_type_take(pt);
351 	if (p) {
352 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
353 		num_pages = 1 << pt->order;
354 	} else {
355 		num_pages = 0;
356 	}
357 	up_read(&pool_shrink_rwsem);
358 
359 	return num_pages;
360 }
361 
362 /* Return the allocation order based for a page */
363 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
364 {
365 	if (pool->use_dma_alloc) {
366 		struct ttm_pool_dma *dma = (void *)p->private;
367 
368 		return dma->vaddr & ~PAGE_MASK;
369 	}
370 
371 	return p->private;
372 }
373 
374 /*
375  * Called when we got a page, either from a pool or newly allocated.
376  * if needed, dma map the page and populate the dma address array.
377  * Populate the page address array.
378  * If the caching is consistent, update any deferred caching. Otherwise
379  * stage this page for an upcoming deferred caching update.
380  */
381 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
382 				   struct page *p, enum ttm_caching page_caching,
383 				   struct ttm_pool_alloc_state *alloc)
384 {
385 	pgoff_t i, nr = 1UL << order;
386 	bool caching_consistent;
387 	int r = 0;
388 
389 	caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
390 
391 	if (caching_consistent) {
392 		r = ttm_pool_apply_caching(alloc);
393 		if (r)
394 			return r;
395 	}
396 
397 	if (alloc->dma_addr) {
398 		r = ttm_pool_map(pool, order, p, &alloc->dma_addr);
399 		if (r)
400 			return r;
401 	}
402 
403 	alloc->remaining_pages -= nr;
404 	for (i = 0; i < nr; ++i)
405 		*alloc->pages++ = p++;
406 
407 	if (caching_consistent)
408 		alloc->caching_divide = alloc->pages;
409 
410 	return 0;
411 }
412 
413 /**
414  * ttm_pool_free_range() - Free a range of TTM pages
415  * @pool: The pool used for allocating.
416  * @tt: The struct ttm_tt holding the page pointers.
417  * @caching: The page caching mode used by the range.
418  * @start_page: index for first page to free.
419  * @end_page: index for last page to free + 1.
420  *
421  * During allocation the ttm_tt page-vector may be populated with ranges of
422  * pages with different attributes if allocation hit an error without being
423  * able to completely fulfill the allocation. This function can be used
424  * to free these individual ranges.
425  */
426 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
427 				enum ttm_caching caching,
428 				pgoff_t start_page, pgoff_t end_page)
429 {
430 	struct page **pages = &tt->pages[start_page];
431 	unsigned int order;
432 	pgoff_t i, nr;
433 
434 	for (i = start_page; i < end_page; i += nr, pages += nr) {
435 		struct ttm_pool_type *pt = NULL;
436 
437 		order = ttm_pool_page_order(pool, *pages);
438 		nr = (1UL << order);
439 		if (tt->dma_address)
440 			ttm_pool_unmap(pool, tt->dma_address[i], nr);
441 
442 		pt = ttm_pool_select_type(pool, caching, order);
443 		if (pt)
444 			ttm_pool_type_give(pt, *pages);
445 		else
446 			ttm_pool_free_page(pool, caching, order, *pages);
447 	}
448 }
449 
450 static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
451 				      struct ttm_pool_alloc_state *alloc)
452 {
453 	alloc->pages = tt->pages;
454 	alloc->caching_divide = tt->pages;
455 	alloc->dma_addr = tt->dma_address;
456 	alloc->remaining_pages = tt->num_pages;
457 	alloc->tt_caching = tt->caching;
458 }
459 
460 /*
461  * Find a suitable allocation order based on highest desired order
462  * and number of remaining pages
463  */
464 static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
465 					      const struct ttm_pool_alloc_state *alloc)
466 {
467 	return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
468 }
469 
470 /**
471  * ttm_pool_alloc - Fill a ttm_tt object
472  *
473  * @pool: ttm_pool to use
474  * @tt: ttm_tt object to fill
475  * @ctx: operation context
476  *
477  * Fill the ttm_tt object with pages and also make sure to DMA map them when
478  * necessary.
479  *
480  * Returns: 0 on successe, negative error code otherwise.
481  */
482 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
483 		   struct ttm_operation_ctx *ctx)
484 {
485 	struct ttm_pool_alloc_state alloc;
486 	enum ttm_caching page_caching;
487 	gfp_t gfp_flags = GFP_USER;
488 	pgoff_t caching_divide;
489 	unsigned int order;
490 	bool allow_pools;
491 	struct page *p;
492 	int r;
493 
494 	ttm_pool_alloc_state_init(tt, &alloc);
495 
496 	WARN_ON(!alloc.remaining_pages || ttm_tt_is_populated(tt));
497 	WARN_ON(alloc.dma_addr && !pool->dev);
498 
499 	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
500 		gfp_flags |= __GFP_ZERO;
501 
502 	if (ctx->gfp_retry_mayfail)
503 		gfp_flags |= __GFP_RETRY_MAYFAIL;
504 
505 	if (pool->use_dma32)
506 		gfp_flags |= GFP_DMA32;
507 	else
508 		gfp_flags |= GFP_HIGHUSER;
509 
510 	page_caching = tt->caching;
511 	allow_pools = true;
512 	for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, &alloc);
513 	     alloc.remaining_pages;
514 	     order = ttm_pool_alloc_find_order(order, &alloc)) {
515 		struct ttm_pool_type *pt;
516 
517 		/* First, try to allocate a page from a pool if one exists. */
518 		p = NULL;
519 		pt = ttm_pool_select_type(pool, page_caching, order);
520 		if (pt && allow_pools)
521 			p = ttm_pool_type_take(pt);
522 		/*
523 		 * If that fails or previously failed, allocate from system.
524 		 * Note that this also disallows additional pool allocations using
525 		 * write-back cached pools of the same order. Consider removing
526 		 * that behaviour.
527 		 */
528 		if (!p) {
529 			page_caching = ttm_cached;
530 			allow_pools = false;
531 			p = ttm_pool_alloc_page(pool, gfp_flags, order);
532 		}
533 		/* If that fails, lower the order if possible and retry. */
534 		if (!p) {
535 			if (order) {
536 				--order;
537 				page_caching = tt->caching;
538 				allow_pools = true;
539 				continue;
540 			}
541 			r = -ENOMEM;
542 			goto error_free_all;
543 		}
544 		r = ttm_pool_page_allocated(pool, order, p, page_caching, &alloc);
545 		if (r)
546 			goto error_free_page;
547 	}
548 
549 	r = ttm_pool_apply_caching(&alloc);
550 	if (r)
551 		goto error_free_all;
552 
553 	return 0;
554 
555 error_free_page:
556 	ttm_pool_free_page(pool, page_caching, order, p);
557 
558 error_free_all:
559 	caching_divide = alloc.caching_divide - tt->pages;
560 	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
561 	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
562 			    tt->num_pages - alloc.remaining_pages);
563 
564 	return r;
565 }
566 EXPORT_SYMBOL(ttm_pool_alloc);
567 
568 /**
569  * ttm_pool_free - Free the backing pages from a ttm_tt object
570  *
571  * @pool: Pool to give pages back to.
572  * @tt: ttm_tt object to unpopulate
573  *
574  * Give the packing pages back to a pool or free them
575  */
576 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
577 {
578 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
579 
580 	while (atomic_long_read(&allocated_pages) > page_pool_size)
581 		ttm_pool_shrink();
582 }
583 EXPORT_SYMBOL(ttm_pool_free);
584 
585 /**
586  * ttm_pool_init - Initialize a pool
587  *
588  * @pool: the pool to initialize
589  * @dev: device for DMA allocations and mappings
590  * @nid: NUMA node to use for allocations
591  * @use_dma_alloc: true if coherent DMA alloc should be used
592  * @use_dma32: true if GFP_DMA32 should be used
593  *
594  * Initialize the pool and its pool types.
595  */
596 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
597 		   int nid, bool use_dma_alloc, bool use_dma32)
598 {
599 	unsigned int i, j;
600 
601 	WARN_ON(!dev && use_dma_alloc);
602 
603 	pool->dev = dev;
604 	pool->nid = nid;
605 	pool->use_dma_alloc = use_dma_alloc;
606 	pool->use_dma32 = use_dma32;
607 
608 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
609 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
610 			struct ttm_pool_type *pt;
611 
612 			/* Initialize only pool types which are actually used */
613 			pt = ttm_pool_select_type(pool, i, j);
614 			if (pt != &pool->caching[i].orders[j])
615 				continue;
616 
617 			ttm_pool_type_init(pt, pool, i, j);
618 		}
619 	}
620 }
621 EXPORT_SYMBOL(ttm_pool_init);
622 
623 /**
624  * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
625  *
626  * This is useful to guarantee that all shrinker invocations have seen an
627  * update, before freeing memory, similar to rcu.
628  */
629 static void ttm_pool_synchronize_shrinkers(void)
630 {
631 	down_write(&pool_shrink_rwsem);
632 	up_write(&pool_shrink_rwsem);
633 }
634 
635 /**
636  * ttm_pool_fini - Cleanup a pool
637  *
638  * @pool: the pool to clean up
639  *
640  * Free all pages in the pool and unregister the types from the global
641  * shrinker.
642  */
643 void ttm_pool_fini(struct ttm_pool *pool)
644 {
645 	unsigned int i, j;
646 
647 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
648 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
649 			struct ttm_pool_type *pt;
650 
651 			pt = ttm_pool_select_type(pool, i, j);
652 			if (pt != &pool->caching[i].orders[j])
653 				continue;
654 
655 			ttm_pool_type_fini(pt);
656 		}
657 	}
658 
659 	/* We removed the pool types from the LRU, but we need to also make sure
660 	 * that no shrinker is concurrently freeing pages from the pool.
661 	 */
662 	ttm_pool_synchronize_shrinkers();
663 }
664 EXPORT_SYMBOL(ttm_pool_fini);
665 
666 /* As long as pages are available make sure to release at least one */
667 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
668 					    struct shrink_control *sc)
669 {
670 	unsigned long num_freed = 0;
671 
672 	do
673 		num_freed += ttm_pool_shrink();
674 	while (!num_freed && atomic_long_read(&allocated_pages));
675 
676 	return num_freed;
677 }
678 
679 /* Return the number of pages available or SHRINK_EMPTY if we have none */
680 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
681 					     struct shrink_control *sc)
682 {
683 	unsigned long num_pages = atomic_long_read(&allocated_pages);
684 
685 	return num_pages ? num_pages : SHRINK_EMPTY;
686 }
687 
688 #ifdef CONFIG_DEBUG_FS
689 /* Count the number of pages available in a pool_type */
690 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
691 {
692 	unsigned int count = 0;
693 	struct page *p;
694 
695 	spin_lock(&pt->lock);
696 	/* Only used for debugfs, the overhead doesn't matter */
697 	list_for_each_entry(p, &pt->pages, lru)
698 		++count;
699 	spin_unlock(&pt->lock);
700 
701 	return count;
702 }
703 
704 /* Print a nice header for the order */
705 static void ttm_pool_debugfs_header(struct seq_file *m)
706 {
707 	unsigned int i;
708 
709 	seq_puts(m, "\t ");
710 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
711 		seq_printf(m, " ---%2u---", i);
712 	seq_puts(m, "\n");
713 }
714 
715 /* Dump information about the different pool types */
716 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
717 				    struct seq_file *m)
718 {
719 	unsigned int i;
720 
721 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
722 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
723 	seq_puts(m, "\n");
724 }
725 
726 /* Dump the total amount of allocated pages */
727 static void ttm_pool_debugfs_footer(struct seq_file *m)
728 {
729 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
730 		   atomic_long_read(&allocated_pages), page_pool_size);
731 }
732 
733 /* Dump the information for the global pools */
734 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
735 {
736 	ttm_pool_debugfs_header(m);
737 
738 	spin_lock(&shrinker_lock);
739 	seq_puts(m, "wc\t:");
740 	ttm_pool_debugfs_orders(global_write_combined, m);
741 	seq_puts(m, "uc\t:");
742 	ttm_pool_debugfs_orders(global_uncached, m);
743 	seq_puts(m, "wc 32\t:");
744 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
745 	seq_puts(m, "uc 32\t:");
746 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
747 	spin_unlock(&shrinker_lock);
748 
749 	ttm_pool_debugfs_footer(m);
750 
751 	return 0;
752 }
753 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
754 
755 /**
756  * ttm_pool_debugfs - Debugfs dump function for a pool
757  *
758  * @pool: the pool to dump the information for
759  * @m: seq_file to dump to
760  *
761  * Make a debugfs dump with the per pool and global information.
762  */
763 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
764 {
765 	unsigned int i;
766 
767 	if (!pool->use_dma_alloc) {
768 		seq_puts(m, "unused\n");
769 		return 0;
770 	}
771 
772 	ttm_pool_debugfs_header(m);
773 
774 	spin_lock(&shrinker_lock);
775 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
776 		seq_puts(m, "DMA ");
777 		switch (i) {
778 		case ttm_cached:
779 			seq_puts(m, "\t:");
780 			break;
781 		case ttm_write_combined:
782 			seq_puts(m, "wc\t:");
783 			break;
784 		case ttm_uncached:
785 			seq_puts(m, "uc\t:");
786 			break;
787 		}
788 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
789 	}
790 	spin_unlock(&shrinker_lock);
791 
792 	ttm_pool_debugfs_footer(m);
793 	return 0;
794 }
795 EXPORT_SYMBOL(ttm_pool_debugfs);
796 
797 /* Test the shrinker functions and dump the result */
798 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
799 {
800 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
801 
802 	fs_reclaim_acquire(GFP_KERNEL);
803 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
804 		   ttm_pool_shrinker_scan(mm_shrinker, &sc));
805 	fs_reclaim_release(GFP_KERNEL);
806 
807 	return 0;
808 }
809 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
810 
811 #endif
812 
813 /**
814  * ttm_pool_mgr_init - Initialize globals
815  *
816  * @num_pages: default number of pages
817  *
818  * Initialize the global locks and lists for the MM shrinker.
819  */
820 int ttm_pool_mgr_init(unsigned long num_pages)
821 {
822 	unsigned int i;
823 
824 	if (!page_pool_size)
825 		page_pool_size = num_pages;
826 
827 	spin_lock_init(&shrinker_lock);
828 	INIT_LIST_HEAD(&shrinker_list);
829 
830 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
831 		ttm_pool_type_init(&global_write_combined[i], NULL,
832 				   ttm_write_combined, i);
833 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
834 
835 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
836 				   ttm_write_combined, i);
837 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
838 				   ttm_uncached, i);
839 	}
840 
841 #ifdef CONFIG_DEBUG_FS
842 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
843 			    &ttm_pool_debugfs_globals_fops);
844 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
845 			    &ttm_pool_debugfs_shrink_fops);
846 #endif
847 
848 	mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
849 	if (!mm_shrinker)
850 		return -ENOMEM;
851 
852 	mm_shrinker->count_objects = ttm_pool_shrinker_count;
853 	mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
854 	mm_shrinker->seeks = 1;
855 
856 	shrinker_register(mm_shrinker);
857 
858 	return 0;
859 }
860 
861 /**
862  * ttm_pool_mgr_fini - Finalize globals
863  *
864  * Cleanup the global pools and unregister the MM shrinker.
865  */
866 void ttm_pool_mgr_fini(void)
867 {
868 	unsigned int i;
869 
870 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
871 		ttm_pool_type_fini(&global_write_combined[i]);
872 		ttm_pool_type_fini(&global_uncached[i]);
873 
874 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
875 		ttm_pool_type_fini(&global_dma32_uncached[i]);
876 	}
877 
878 	shrinker_free(mm_shrinker);
879 	WARN_ON(!list_empty(&shrinker_list));
880 }
881