xref: /freebsd/sys/dev/drm2/ttm/ttm_page_alloc_dma.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1*592ffb21SWarner Losh /*
2*592ffb21SWarner Losh  * Copyright 2011 (c) Oracle Corp.
3*592ffb21SWarner Losh 
4*592ffb21SWarner Losh  * Permission is hereby granted, free of charge, to any person obtaining a
5*592ffb21SWarner Losh  * copy of this software and associated documentation files (the "Software"),
6*592ffb21SWarner Losh  * to deal in the Software without restriction, including without limitation
7*592ffb21SWarner Losh  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8*592ffb21SWarner Losh  * and/or sell copies of the Software, and to permit persons to whom the
9*592ffb21SWarner Losh  * Software is furnished to do so, subject to the following conditions:
10*592ffb21SWarner Losh  *
11*592ffb21SWarner Losh  * The above copyright notice and this permission notice (including the
12*592ffb21SWarner Losh  * next paragraph) shall be included in all copies or substantial portions
13*592ffb21SWarner Losh  * of the Software.
14*592ffb21SWarner Losh  *
15*592ffb21SWarner Losh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*592ffb21SWarner Losh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*592ffb21SWarner Losh  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18*592ffb21SWarner Losh  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*592ffb21SWarner Losh  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*592ffb21SWarner Losh  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21*592ffb21SWarner Losh  * DEALINGS IN THE SOFTWARE.
22*592ffb21SWarner Losh  *
23*592ffb21SWarner Losh  * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24*592ffb21SWarner Losh  */
25*592ffb21SWarner Losh 
26*592ffb21SWarner Losh /*
27*592ffb21SWarner Losh  * A simple DMA pool losely based on dmapool.c. It has certain advantages
28*592ffb21SWarner Losh  * over the DMA pools:
29*592ffb21SWarner Losh  * - Pool collects resently freed pages for reuse (and hooks up to
30*592ffb21SWarner Losh  *   the shrinker).
31*592ffb21SWarner Losh  * - Tracks currently in use pages
32*592ffb21SWarner Losh  * - Tracks whether the page is UC, WB or cached (and reverts to WB
33*592ffb21SWarner Losh  *   when freed).
34*592ffb21SWarner Losh  */
35*592ffb21SWarner Losh 
36*592ffb21SWarner Losh #include <sys/cdefs.h>
37*592ffb21SWarner Losh #define pr_fmt(fmt) "[TTM] " fmt
38*592ffb21SWarner Losh 
39*592ffb21SWarner Losh #include <linux/dma-mapping.h>
40*592ffb21SWarner Losh #include <linux/list.h>
41*592ffb21SWarner Losh #include <linux/seq_file.h> /* for seq_printf */
42*592ffb21SWarner Losh #include <linux/slab.h>
43*592ffb21SWarner Losh #include <linux/spinlock.h>
44*592ffb21SWarner Losh #include <linux/highmem.h>
45*592ffb21SWarner Losh #include <linux/mm_types.h>
46*592ffb21SWarner Losh #include <linux/module.h>
47*592ffb21SWarner Losh #include <linux/mm.h>
48*592ffb21SWarner Losh #include <linux/atomic.h>
49*592ffb21SWarner Losh #include <linux/device.h>
50*592ffb21SWarner Losh #include <linux/kthread.h>
51*592ffb21SWarner Losh #include <drm/ttm/ttm_bo_driver.h>
52*592ffb21SWarner Losh #include <drm/ttm/ttm_page_alloc.h>
53*592ffb21SWarner Losh #ifdef TTM_HAS_AGP
54*592ffb21SWarner Losh #include <asm/agp.h>
55*592ffb21SWarner Losh #endif
56*592ffb21SWarner Losh 
57*592ffb21SWarner Losh #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
58*592ffb21SWarner Losh #define SMALL_ALLOCATION		4
59*592ffb21SWarner Losh #define FREE_ALL_PAGES			(~0U)
60*592ffb21SWarner Losh /* times are in msecs */
61*592ffb21SWarner Losh #define IS_UNDEFINED			(0)
62*592ffb21SWarner Losh #define IS_WC				(1<<1)
63*592ffb21SWarner Losh #define IS_UC				(1<<2)
64*592ffb21SWarner Losh #define IS_CACHED			(1<<3)
65*592ffb21SWarner Losh #define IS_DMA32			(1<<4)
66*592ffb21SWarner Losh 
67*592ffb21SWarner Losh enum pool_type {
68*592ffb21SWarner Losh 	POOL_IS_UNDEFINED,
69*592ffb21SWarner Losh 	POOL_IS_WC = IS_WC,
70*592ffb21SWarner Losh 	POOL_IS_UC = IS_UC,
71*592ffb21SWarner Losh 	POOL_IS_CACHED = IS_CACHED,
72*592ffb21SWarner Losh 	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
73*592ffb21SWarner Losh 	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
74*592ffb21SWarner Losh 	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
75*592ffb21SWarner Losh };
76*592ffb21SWarner Losh /*
77*592ffb21SWarner Losh  * The pool structure. There are usually six pools:
78*592ffb21SWarner Losh  *  - generic (not restricted to DMA32):
79*592ffb21SWarner Losh  *      - write combined, uncached, cached.
80*592ffb21SWarner Losh  *  - dma32 (up to 2^32 - so up 4GB):
81*592ffb21SWarner Losh  *      - write combined, uncached, cached.
82*592ffb21SWarner Losh  * for each 'struct device'. The 'cached' is for pages that are actively used.
83*592ffb21SWarner Losh  * The other ones can be shrunk by the shrinker API if necessary.
84*592ffb21SWarner Losh  * @pools: The 'struct device->dma_pools' link.
85*592ffb21SWarner Losh  * @type: Type of the pool
86*592ffb21SWarner Losh  * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
87*592ffb21SWarner Losh  * used with irqsave/irqrestore variants because pool allocator maybe called
88*592ffb21SWarner Losh  * from delayed work.
89*592ffb21SWarner Losh  * @inuse_list: Pool of pages that are in use. The order is very important and
90*592ffb21SWarner Losh  *   it is in the order that the TTM pages that are put back are in.
91*592ffb21SWarner Losh  * @free_list: Pool of pages that are free to be used. No order requirements.
92*592ffb21SWarner Losh  * @dev: The device that is associated with these pools.
93*592ffb21SWarner Losh  * @size: Size used during DMA allocation.
94*592ffb21SWarner Losh  * @npages_free: Count of available pages for re-use.
95*592ffb21SWarner Losh  * @npages_in_use: Count of pages that are in use.
96*592ffb21SWarner Losh  * @nfrees: Stats when pool is shrinking.
97*592ffb21SWarner Losh  * @nrefills: Stats when the pool is grown.
98*592ffb21SWarner Losh  * @gfp_flags: Flags to pass for alloc_page.
99*592ffb21SWarner Losh  * @name: Name of the pool.
100*592ffb21SWarner Losh  * @dev_name: Name derieved from dev - similar to how dev_info works.
101*592ffb21SWarner Losh  *   Used during shutdown as the dev_info during release is unavailable.
102*592ffb21SWarner Losh  */
103*592ffb21SWarner Losh struct dma_pool {
104*592ffb21SWarner Losh 	struct list_head pools; /* The 'struct device->dma_pools link */
105*592ffb21SWarner Losh 	enum pool_type type;
106*592ffb21SWarner Losh 	spinlock_t lock;
107*592ffb21SWarner Losh 	struct list_head inuse_list;
108*592ffb21SWarner Losh 	struct list_head free_list;
109*592ffb21SWarner Losh 	struct device *dev;
110*592ffb21SWarner Losh 	unsigned size;
111*592ffb21SWarner Losh 	unsigned npages_free;
112*592ffb21SWarner Losh 	unsigned npages_in_use;
113*592ffb21SWarner Losh 	unsigned long nfrees; /* Stats when shrunk. */
114*592ffb21SWarner Losh 	unsigned long nrefills; /* Stats when grown. */
115*592ffb21SWarner Losh 	gfp_t gfp_flags;
116*592ffb21SWarner Losh 	char name[13]; /* "cached dma32" */
117*592ffb21SWarner Losh 	char dev_name[64]; /* Constructed from dev */
118*592ffb21SWarner Losh };
119*592ffb21SWarner Losh 
120*592ffb21SWarner Losh /*
121*592ffb21SWarner Losh  * The accounting page keeping track of the allocated page along with
122*592ffb21SWarner Losh  * the DMA address.
123*592ffb21SWarner Losh  * @page_list: The link to the 'page_list' in 'struct dma_pool'.
124*592ffb21SWarner Losh  * @vaddr: The virtual address of the page
125*592ffb21SWarner Losh  * @dma: The bus address of the page. If the page is not allocated
126*592ffb21SWarner Losh  *   via the DMA API, it will be -1.
127*592ffb21SWarner Losh  */
128*592ffb21SWarner Losh struct dma_page {
129*592ffb21SWarner Losh 	struct list_head page_list;
130*592ffb21SWarner Losh 	void *vaddr;
131*592ffb21SWarner Losh 	struct page *p;
132*592ffb21SWarner Losh 	dma_addr_t dma;
133*592ffb21SWarner Losh };
134*592ffb21SWarner Losh 
135*592ffb21SWarner Losh /*
136*592ffb21SWarner Losh  * Limits for the pool. They are handled without locks because only place where
137*592ffb21SWarner Losh  * they may change is in sysfs store. They won't have immediate effect anyway
138*592ffb21SWarner Losh  * so forcing serialization to access them is pointless.
139*592ffb21SWarner Losh  */
140*592ffb21SWarner Losh 
141*592ffb21SWarner Losh struct ttm_pool_opts {
142*592ffb21SWarner Losh 	unsigned	alloc_size;
143*592ffb21SWarner Losh 	unsigned	max_size;
144*592ffb21SWarner Losh 	unsigned	small;
145*592ffb21SWarner Losh };
146*592ffb21SWarner Losh 
147*592ffb21SWarner Losh /*
148*592ffb21SWarner Losh  * Contains the list of all of the 'struct device' and their corresponding
149*592ffb21SWarner Losh  * DMA pools. Guarded by _mutex->lock.
150*592ffb21SWarner Losh  * @pools: The link to 'struct ttm_pool_manager->pools'
151*592ffb21SWarner Losh  * @dev: The 'struct device' associated with the 'pool'
152*592ffb21SWarner Losh  * @pool: The 'struct dma_pool' associated with the 'dev'
153*592ffb21SWarner Losh  */
154*592ffb21SWarner Losh struct device_pools {
155*592ffb21SWarner Losh 	struct list_head pools;
156*592ffb21SWarner Losh 	struct device *dev;
157*592ffb21SWarner Losh 	struct dma_pool *pool;
158*592ffb21SWarner Losh };
159*592ffb21SWarner Losh 
160*592ffb21SWarner Losh /*
161*592ffb21SWarner Losh  * struct ttm_pool_manager - Holds memory pools for fast allocation
162*592ffb21SWarner Losh  *
163*592ffb21SWarner Losh  * @lock: Lock used when adding/removing from pools
164*592ffb21SWarner Losh  * @pools: List of 'struct device' and 'struct dma_pool' tuples.
165*592ffb21SWarner Losh  * @options: Limits for the pool.
166*592ffb21SWarner Losh  * @npools: Total amount of pools in existence.
167*592ffb21SWarner Losh  * @shrinker: The structure used by [un|]register_shrinker
168*592ffb21SWarner Losh  */
169*592ffb21SWarner Losh struct ttm_pool_manager {
170*592ffb21SWarner Losh 	struct mutex		lock;
171*592ffb21SWarner Losh 	struct list_head	pools;
172*592ffb21SWarner Losh 	struct ttm_pool_opts	options;
173*592ffb21SWarner Losh 	unsigned		npools;
174*592ffb21SWarner Losh 	struct shrinker		mm_shrink;
175*592ffb21SWarner Losh 	struct kobject		kobj;
176*592ffb21SWarner Losh };
177*592ffb21SWarner Losh 
178*592ffb21SWarner Losh static struct ttm_pool_manager *_manager;
179*592ffb21SWarner Losh 
180*592ffb21SWarner Losh static struct attribute ttm_page_pool_max = {
181*592ffb21SWarner Losh 	.name = "pool_max_size",
182*592ffb21SWarner Losh 	.mode = S_IRUGO | S_IWUSR
183*592ffb21SWarner Losh };
184*592ffb21SWarner Losh static struct attribute ttm_page_pool_small = {
185*592ffb21SWarner Losh 	.name = "pool_small_allocation",
186*592ffb21SWarner Losh 	.mode = S_IRUGO | S_IWUSR
187*592ffb21SWarner Losh };
188*592ffb21SWarner Losh static struct attribute ttm_page_pool_alloc_size = {
189*592ffb21SWarner Losh 	.name = "pool_allocation_size",
190*592ffb21SWarner Losh 	.mode = S_IRUGO | S_IWUSR
191*592ffb21SWarner Losh };
192*592ffb21SWarner Losh 
193*592ffb21SWarner Losh static struct attribute *ttm_pool_attrs[] = {
194*592ffb21SWarner Losh 	&ttm_page_pool_max,
195*592ffb21SWarner Losh 	&ttm_page_pool_small,
196*592ffb21SWarner Losh 	&ttm_page_pool_alloc_size,
197*592ffb21SWarner Losh 	NULL
198*592ffb21SWarner Losh };
199*592ffb21SWarner Losh 
ttm_pool_kobj_release(struct kobject * kobj)200*592ffb21SWarner Losh static void ttm_pool_kobj_release(struct kobject *kobj)
201*592ffb21SWarner Losh {
202*592ffb21SWarner Losh 	struct ttm_pool_manager *m =
203*592ffb21SWarner Losh 		container_of(kobj, struct ttm_pool_manager, kobj);
204*592ffb21SWarner Losh 	kfree(m);
205*592ffb21SWarner Losh }
206*592ffb21SWarner Losh 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)207*592ffb21SWarner Losh static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
208*592ffb21SWarner Losh 			      const char *buffer, size_t size)
209*592ffb21SWarner Losh {
210*592ffb21SWarner Losh 	struct ttm_pool_manager *m =
211*592ffb21SWarner Losh 		container_of(kobj, struct ttm_pool_manager, kobj);
212*592ffb21SWarner Losh 	int chars;
213*592ffb21SWarner Losh 	unsigned val;
214*592ffb21SWarner Losh 	chars = sscanf(buffer, "%u", &val);
215*592ffb21SWarner Losh 	if (chars == 0)
216*592ffb21SWarner Losh 		return size;
217*592ffb21SWarner Losh 
218*592ffb21SWarner Losh 	/* Convert kb to number of pages */
219*592ffb21SWarner Losh 	val = val / (PAGE_SIZE >> 10);
220*592ffb21SWarner Losh 
221*592ffb21SWarner Losh 	if (attr == &ttm_page_pool_max)
222*592ffb21SWarner Losh 		m->options.max_size = val;
223*592ffb21SWarner Losh 	else if (attr == &ttm_page_pool_small)
224*592ffb21SWarner Losh 		m->options.small = val;
225*592ffb21SWarner Losh 	else if (attr == &ttm_page_pool_alloc_size) {
226*592ffb21SWarner Losh 		if (val > NUM_PAGES_TO_ALLOC*8) {
227*592ffb21SWarner Losh 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228*592ffb21SWarner Losh 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229*592ffb21SWarner Losh 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230*592ffb21SWarner Losh 			return size;
231*592ffb21SWarner Losh 		} else if (val > NUM_PAGES_TO_ALLOC) {
232*592ffb21SWarner Losh 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233*592ffb21SWarner Losh 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
234*592ffb21SWarner Losh 		}
235*592ffb21SWarner Losh 		m->options.alloc_size = val;
236*592ffb21SWarner Losh 	}
237*592ffb21SWarner Losh 
238*592ffb21SWarner Losh 	return size;
239*592ffb21SWarner Losh }
240*592ffb21SWarner Losh 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)241*592ffb21SWarner Losh static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
242*592ffb21SWarner Losh 			     char *buffer)
243*592ffb21SWarner Losh {
244*592ffb21SWarner Losh 	struct ttm_pool_manager *m =
245*592ffb21SWarner Losh 		container_of(kobj, struct ttm_pool_manager, kobj);
246*592ffb21SWarner Losh 	unsigned val = 0;
247*592ffb21SWarner Losh 
248*592ffb21SWarner Losh 	if (attr == &ttm_page_pool_max)
249*592ffb21SWarner Losh 		val = m->options.max_size;
250*592ffb21SWarner Losh 	else if (attr == &ttm_page_pool_small)
251*592ffb21SWarner Losh 		val = m->options.small;
252*592ffb21SWarner Losh 	else if (attr == &ttm_page_pool_alloc_size)
253*592ffb21SWarner Losh 		val = m->options.alloc_size;
254*592ffb21SWarner Losh 
255*592ffb21SWarner Losh 	val = val * (PAGE_SIZE >> 10);
256*592ffb21SWarner Losh 
257*592ffb21SWarner Losh 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
258*592ffb21SWarner Losh }
259*592ffb21SWarner Losh 
260*592ffb21SWarner Losh static const struct sysfs_ops ttm_pool_sysfs_ops = {
261*592ffb21SWarner Losh 	.show = &ttm_pool_show,
262*592ffb21SWarner Losh 	.store = &ttm_pool_store,
263*592ffb21SWarner Losh };
264*592ffb21SWarner Losh 
265*592ffb21SWarner Losh static struct kobj_type ttm_pool_kobj_type = {
266*592ffb21SWarner Losh 	.release = &ttm_pool_kobj_release,
267*592ffb21SWarner Losh 	.sysfs_ops = &ttm_pool_sysfs_ops,
268*592ffb21SWarner Losh 	.default_attrs = ttm_pool_attrs,
269*592ffb21SWarner Losh };
270*592ffb21SWarner Losh 
271*592ffb21SWarner Losh #ifndef CONFIG_X86
set_pages_array_wb(struct page ** pages,int addrinarray)272*592ffb21SWarner Losh static int set_pages_array_wb(struct page **pages, int addrinarray)
273*592ffb21SWarner Losh {
274*592ffb21SWarner Losh #ifdef TTM_HAS_AGP
275*592ffb21SWarner Losh 	int i;
276*592ffb21SWarner Losh 
277*592ffb21SWarner Losh 	for (i = 0; i < addrinarray; i++)
278*592ffb21SWarner Losh 		unmap_page_from_agp(pages[i]);
279*592ffb21SWarner Losh #endif
280*592ffb21SWarner Losh 	return 0;
281*592ffb21SWarner Losh }
282*592ffb21SWarner Losh 
set_pages_array_wc(struct page ** pages,int addrinarray)283*592ffb21SWarner Losh static int set_pages_array_wc(struct page **pages, int addrinarray)
284*592ffb21SWarner Losh {
285*592ffb21SWarner Losh #ifdef TTM_HAS_AGP
286*592ffb21SWarner Losh 	int i;
287*592ffb21SWarner Losh 
288*592ffb21SWarner Losh 	for (i = 0; i < addrinarray; i++)
289*592ffb21SWarner Losh 		map_page_into_agp(pages[i]);
290*592ffb21SWarner Losh #endif
291*592ffb21SWarner Losh 	return 0;
292*592ffb21SWarner Losh }
293*592ffb21SWarner Losh 
set_pages_array_uc(struct page ** pages,int addrinarray)294*592ffb21SWarner Losh static int set_pages_array_uc(struct page **pages, int addrinarray)
295*592ffb21SWarner Losh {
296*592ffb21SWarner Losh #ifdef TTM_HAS_AGP
297*592ffb21SWarner Losh 	int i;
298*592ffb21SWarner Losh 
299*592ffb21SWarner Losh 	for (i = 0; i < addrinarray; i++)
300*592ffb21SWarner Losh 		map_page_into_agp(pages[i]);
301*592ffb21SWarner Losh #endif
302*592ffb21SWarner Losh 	return 0;
303*592ffb21SWarner Losh }
304*592ffb21SWarner Losh #endif /* for !CONFIG_X86 */
305*592ffb21SWarner Losh 
ttm_set_pages_caching(struct dma_pool * pool,struct page ** pages,unsigned cpages)306*592ffb21SWarner Losh static int ttm_set_pages_caching(struct dma_pool *pool,
307*592ffb21SWarner Losh 				 struct page **pages, unsigned cpages)
308*592ffb21SWarner Losh {
309*592ffb21SWarner Losh 	int r = 0;
310*592ffb21SWarner Losh 	/* Set page caching */
311*592ffb21SWarner Losh 	if (pool->type & IS_UC) {
312*592ffb21SWarner Losh 		r = set_pages_array_uc(pages, cpages);
313*592ffb21SWarner Losh 		if (r)
314*592ffb21SWarner Losh 			pr_err("%s: Failed to set %d pages to uc!\n",
315*592ffb21SWarner Losh 			       pool->dev_name, cpages);
316*592ffb21SWarner Losh 	}
317*592ffb21SWarner Losh 	if (pool->type & IS_WC) {
318*592ffb21SWarner Losh 		r = set_pages_array_wc(pages, cpages);
319*592ffb21SWarner Losh 		if (r)
320*592ffb21SWarner Losh 			pr_err("%s: Failed to set %d pages to wc!\n",
321*592ffb21SWarner Losh 			       pool->dev_name, cpages);
322*592ffb21SWarner Losh 	}
323*592ffb21SWarner Losh 	return r;
324*592ffb21SWarner Losh }
325*592ffb21SWarner Losh 
__ttm_dma_free_page(struct dma_pool * pool,struct dma_page * d_page)326*592ffb21SWarner Losh static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
327*592ffb21SWarner Losh {
328*592ffb21SWarner Losh 	dma_addr_t dma = d_page->dma;
329*592ffb21SWarner Losh 	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
330*592ffb21SWarner Losh 
331*592ffb21SWarner Losh 	kfree(d_page);
332*592ffb21SWarner Losh 	d_page = NULL;
333*592ffb21SWarner Losh }
__ttm_dma_alloc_page(struct dma_pool * pool)334*592ffb21SWarner Losh static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
335*592ffb21SWarner Losh {
336*592ffb21SWarner Losh 	struct dma_page *d_page;
337*592ffb21SWarner Losh 
338*592ffb21SWarner Losh 	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
339*592ffb21SWarner Losh 	if (!d_page)
340*592ffb21SWarner Losh 		return NULL;
341*592ffb21SWarner Losh 
342*592ffb21SWarner Losh 	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
343*592ffb21SWarner Losh 					   &d_page->dma,
344*592ffb21SWarner Losh 					   pool->gfp_flags);
345*592ffb21SWarner Losh 	if (d_page->vaddr)
346*592ffb21SWarner Losh 		d_page->p = virt_to_page(d_page->vaddr);
347*592ffb21SWarner Losh 	else {
348*592ffb21SWarner Losh 		kfree(d_page);
349*592ffb21SWarner Losh 		d_page = NULL;
350*592ffb21SWarner Losh 	}
351*592ffb21SWarner Losh 	return d_page;
352*592ffb21SWarner Losh }
ttm_to_type(int flags,enum ttm_caching_state cstate)353*592ffb21SWarner Losh static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
354*592ffb21SWarner Losh {
355*592ffb21SWarner Losh 	enum pool_type type = IS_UNDEFINED;
356*592ffb21SWarner Losh 
357*592ffb21SWarner Losh 	if (flags & TTM_PAGE_FLAG_DMA32)
358*592ffb21SWarner Losh 		type |= IS_DMA32;
359*592ffb21SWarner Losh 	if (cstate == tt_cached)
360*592ffb21SWarner Losh 		type |= IS_CACHED;
361*592ffb21SWarner Losh 	else if (cstate == tt_uncached)
362*592ffb21SWarner Losh 		type |= IS_UC;
363*592ffb21SWarner Losh 	else
364*592ffb21SWarner Losh 		type |= IS_WC;
365*592ffb21SWarner Losh 
366*592ffb21SWarner Losh 	return type;
367*592ffb21SWarner Losh }
368*592ffb21SWarner Losh 
ttm_pool_update_free_locked(struct dma_pool * pool,unsigned freed_pages)369*592ffb21SWarner Losh static void ttm_pool_update_free_locked(struct dma_pool *pool,
370*592ffb21SWarner Losh 					unsigned freed_pages)
371*592ffb21SWarner Losh {
372*592ffb21SWarner Losh 	pool->npages_free -= freed_pages;
373*592ffb21SWarner Losh 	pool->nfrees += freed_pages;
374*592ffb21SWarner Losh 
375*592ffb21SWarner Losh }
376*592ffb21SWarner Losh 
377*592ffb21SWarner Losh /* set memory back to wb and free the pages. */
ttm_dma_pages_put(struct dma_pool * pool,struct list_head * d_pages,struct page * pages[],unsigned npages)378*592ffb21SWarner Losh static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
379*592ffb21SWarner Losh 			      struct page *pages[], unsigned npages)
380*592ffb21SWarner Losh {
381*592ffb21SWarner Losh 	struct dma_page *d_page, *tmp;
382*592ffb21SWarner Losh 
383*592ffb21SWarner Losh 	/* Don't set WB on WB page pool. */
384*592ffb21SWarner Losh 	if (npages && !(pool->type & IS_CACHED) &&
385*592ffb21SWarner Losh 	    set_pages_array_wb(pages, npages))
386*592ffb21SWarner Losh 		pr_err("%s: Failed to set %d pages to wb!\n",
387*592ffb21SWarner Losh 		       pool->dev_name, npages);
388*592ffb21SWarner Losh 
389*592ffb21SWarner Losh 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
390*592ffb21SWarner Losh 		list_del(&d_page->page_list);
391*592ffb21SWarner Losh 		__ttm_dma_free_page(pool, d_page);
392*592ffb21SWarner Losh 	}
393*592ffb21SWarner Losh }
394*592ffb21SWarner Losh 
ttm_dma_page_put(struct dma_pool * pool,struct dma_page * d_page)395*592ffb21SWarner Losh static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
396*592ffb21SWarner Losh {
397*592ffb21SWarner Losh 	/* Don't set WB on WB page pool. */
398*592ffb21SWarner Losh 	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
399*592ffb21SWarner Losh 		pr_err("%s: Failed to set %d pages to wb!\n",
400*592ffb21SWarner Losh 		       pool->dev_name, 1);
401*592ffb21SWarner Losh 
402*592ffb21SWarner Losh 	list_del(&d_page->page_list);
403*592ffb21SWarner Losh 	__ttm_dma_free_page(pool, d_page);
404*592ffb21SWarner Losh }
405*592ffb21SWarner Losh 
406*592ffb21SWarner Losh /*
407*592ffb21SWarner Losh  * Free pages from pool.
408*592ffb21SWarner Losh  *
409*592ffb21SWarner Losh  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
410*592ffb21SWarner Losh  * number of pages in one go.
411*592ffb21SWarner Losh  *
412*592ffb21SWarner Losh  * @pool: to free the pages from
413*592ffb21SWarner Losh  * @nr_free: If set to true will free all pages in pool
414*592ffb21SWarner Losh  **/
ttm_dma_page_pool_free(struct dma_pool * pool,unsigned nr_free)415*592ffb21SWarner Losh static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
416*592ffb21SWarner Losh {
417*592ffb21SWarner Losh 	unsigned long irq_flags;
418*592ffb21SWarner Losh 	struct dma_page *dma_p, *tmp;
419*592ffb21SWarner Losh 	struct page **pages_to_free;
420*592ffb21SWarner Losh 	struct list_head d_pages;
421*592ffb21SWarner Losh 	unsigned freed_pages = 0,
422*592ffb21SWarner Losh 		 npages_to_free = nr_free;
423*592ffb21SWarner Losh 
424*592ffb21SWarner Losh 	if (NUM_PAGES_TO_ALLOC < nr_free)
425*592ffb21SWarner Losh 		npages_to_free = NUM_PAGES_TO_ALLOC;
426*592ffb21SWarner Losh #if 0
427*592ffb21SWarner Losh 	if (nr_free > 1) {
428*592ffb21SWarner Losh 		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
429*592ffb21SWarner Losh 			 pool->dev_name, pool->name, current->pid,
430*592ffb21SWarner Losh 			 npages_to_free, nr_free);
431*592ffb21SWarner Losh 	}
432*592ffb21SWarner Losh #endif
433*592ffb21SWarner Losh 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
434*592ffb21SWarner Losh 			GFP_KERNEL);
435*592ffb21SWarner Losh 
436*592ffb21SWarner Losh 	if (!pages_to_free) {
437*592ffb21SWarner Losh 		pr_err("%s: Failed to allocate memory for pool free operation\n",
438*592ffb21SWarner Losh 		       pool->dev_name);
439*592ffb21SWarner Losh 		return 0;
440*592ffb21SWarner Losh 	}
441*592ffb21SWarner Losh 	INIT_LIST_HEAD(&d_pages);
442*592ffb21SWarner Losh restart:
443*592ffb21SWarner Losh 	spin_lock_irqsave(&pool->lock, irq_flags);
444*592ffb21SWarner Losh 
445*592ffb21SWarner Losh 	/* We picking the oldest ones off the list */
446*592ffb21SWarner Losh 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
447*592ffb21SWarner Losh 					 page_list) {
448*592ffb21SWarner Losh 		if (freed_pages >= npages_to_free)
449*592ffb21SWarner Losh 			break;
450*592ffb21SWarner Losh 
451*592ffb21SWarner Losh 		/* Move the dma_page from one list to another. */
452*592ffb21SWarner Losh 		list_move(&dma_p->page_list, &d_pages);
453*592ffb21SWarner Losh 
454*592ffb21SWarner Losh 		pages_to_free[freed_pages++] = dma_p->p;
455*592ffb21SWarner Losh 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
456*592ffb21SWarner Losh 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
457*592ffb21SWarner Losh 
458*592ffb21SWarner Losh 			ttm_pool_update_free_locked(pool, freed_pages);
459*592ffb21SWarner Losh 			/**
460*592ffb21SWarner Losh 			 * Because changing page caching is costly
461*592ffb21SWarner Losh 			 * we unlock the pool to prevent stalling.
462*592ffb21SWarner Losh 			 */
463*592ffb21SWarner Losh 			spin_unlock_irqrestore(&pool->lock, irq_flags);
464*592ffb21SWarner Losh 
465*592ffb21SWarner Losh 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
466*592ffb21SWarner Losh 					  freed_pages);
467*592ffb21SWarner Losh 
468*592ffb21SWarner Losh 			INIT_LIST_HEAD(&d_pages);
469*592ffb21SWarner Losh 
470*592ffb21SWarner Losh 			if (likely(nr_free != FREE_ALL_PAGES))
471*592ffb21SWarner Losh 				nr_free -= freed_pages;
472*592ffb21SWarner Losh 
473*592ffb21SWarner Losh 			if (NUM_PAGES_TO_ALLOC >= nr_free)
474*592ffb21SWarner Losh 				npages_to_free = nr_free;
475*592ffb21SWarner Losh 			else
476*592ffb21SWarner Losh 				npages_to_free = NUM_PAGES_TO_ALLOC;
477*592ffb21SWarner Losh 
478*592ffb21SWarner Losh 			freed_pages = 0;
479*592ffb21SWarner Losh 
480*592ffb21SWarner Losh 			/* free all so restart the processing */
481*592ffb21SWarner Losh 			if (nr_free)
482*592ffb21SWarner Losh 				goto restart;
483*592ffb21SWarner Losh 
484*592ffb21SWarner Losh 			/* Not allowed to fall through or break because
485*592ffb21SWarner Losh 			 * following context is inside spinlock while we are
486*592ffb21SWarner Losh 			 * outside here.
487*592ffb21SWarner Losh 			 */
488*592ffb21SWarner Losh 			goto out;
489*592ffb21SWarner Losh 
490*592ffb21SWarner Losh 		}
491*592ffb21SWarner Losh 	}
492*592ffb21SWarner Losh 
493*592ffb21SWarner Losh 	/* remove range of pages from the pool */
494*592ffb21SWarner Losh 	if (freed_pages) {
495*592ffb21SWarner Losh 		ttm_pool_update_free_locked(pool, freed_pages);
496*592ffb21SWarner Losh 		nr_free -= freed_pages;
497*592ffb21SWarner Losh 	}
498*592ffb21SWarner Losh 
499*592ffb21SWarner Losh 	spin_unlock_irqrestore(&pool->lock, irq_flags);
500*592ffb21SWarner Losh 
501*592ffb21SWarner Losh 	if (freed_pages)
502*592ffb21SWarner Losh 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
503*592ffb21SWarner Losh out:
504*592ffb21SWarner Losh 	kfree(pages_to_free);
505*592ffb21SWarner Losh 	return nr_free;
506*592ffb21SWarner Losh }
507*592ffb21SWarner Losh 
ttm_dma_free_pool(struct device * dev,enum pool_type type)508*592ffb21SWarner Losh static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
509*592ffb21SWarner Losh {
510*592ffb21SWarner Losh 	struct device_pools *p;
511*592ffb21SWarner Losh 	struct dma_pool *pool;
512*592ffb21SWarner Losh 
513*592ffb21SWarner Losh 	if (!dev)
514*592ffb21SWarner Losh 		return;
515*592ffb21SWarner Losh 
516*592ffb21SWarner Losh 	mutex_lock(&_manager->lock);
517*592ffb21SWarner Losh 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
518*592ffb21SWarner Losh 		if (p->dev != dev)
519*592ffb21SWarner Losh 			continue;
520*592ffb21SWarner Losh 		pool = p->pool;
521*592ffb21SWarner Losh 		if (pool->type != type)
522*592ffb21SWarner Losh 			continue;
523*592ffb21SWarner Losh 
524*592ffb21SWarner Losh 		list_del(&p->pools);
525*592ffb21SWarner Losh 		kfree(p);
526*592ffb21SWarner Losh 		_manager->npools--;
527*592ffb21SWarner Losh 		break;
528*592ffb21SWarner Losh 	}
529*592ffb21SWarner Losh 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
530*592ffb21SWarner Losh 		if (pool->type != type)
531*592ffb21SWarner Losh 			continue;
532*592ffb21SWarner Losh 		/* Takes a spinlock.. */
533*592ffb21SWarner Losh 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
534*592ffb21SWarner Losh 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
535*592ffb21SWarner Losh 		/* This code path is called after _all_ references to the
536*592ffb21SWarner Losh 		 * struct device has been dropped - so nobody should be
537*592ffb21SWarner Losh 		 * touching it. In case somebody is trying to _add_ we are
538*592ffb21SWarner Losh 		 * guarded by the mutex. */
539*592ffb21SWarner Losh 		list_del(&pool->pools);
540*592ffb21SWarner Losh 		kfree(pool);
541*592ffb21SWarner Losh 		break;
542*592ffb21SWarner Losh 	}
543*592ffb21SWarner Losh 	mutex_unlock(&_manager->lock);
544*592ffb21SWarner Losh }
545*592ffb21SWarner Losh 
546*592ffb21SWarner Losh /*
547*592ffb21SWarner Losh  * On free-ing of the 'struct device' this deconstructor is run.
548*592ffb21SWarner Losh  * Albeit the pool might have already been freed earlier.
549*592ffb21SWarner Losh  */
ttm_dma_pool_release(struct device * dev,void * res)550*592ffb21SWarner Losh static void ttm_dma_pool_release(struct device *dev, void *res)
551*592ffb21SWarner Losh {
552*592ffb21SWarner Losh 	struct dma_pool *pool = *(struct dma_pool **)res;
553*592ffb21SWarner Losh 
554*592ffb21SWarner Losh 	if (pool)
555*592ffb21SWarner Losh 		ttm_dma_free_pool(dev, pool->type);
556*592ffb21SWarner Losh }
557*592ffb21SWarner Losh 
ttm_dma_pool_match(struct device * dev,void * res,void * match_data)558*592ffb21SWarner Losh static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
559*592ffb21SWarner Losh {
560*592ffb21SWarner Losh 	return *(struct dma_pool **)res == match_data;
561*592ffb21SWarner Losh }
562*592ffb21SWarner Losh 
ttm_dma_pool_init(struct device * dev,gfp_t flags,enum pool_type type)563*592ffb21SWarner Losh static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
564*592ffb21SWarner Losh 					  enum pool_type type)
565*592ffb21SWarner Losh {
566*592ffb21SWarner Losh 	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
567*592ffb21SWarner Losh 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
568*592ffb21SWarner Losh 	struct device_pools *sec_pool = NULL;
569*592ffb21SWarner Losh 	struct dma_pool *pool = NULL, **ptr;
570*592ffb21SWarner Losh 	unsigned i;
571*592ffb21SWarner Losh 	int ret = -ENODEV;
572*592ffb21SWarner Losh 	char *p;
573*592ffb21SWarner Losh 
574*592ffb21SWarner Losh 	if (!dev)
575*592ffb21SWarner Losh 		return NULL;
576*592ffb21SWarner Losh 
577*592ffb21SWarner Losh 	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
578*592ffb21SWarner Losh 	if (!ptr)
579*592ffb21SWarner Losh 		return NULL;
580*592ffb21SWarner Losh 
581*592ffb21SWarner Losh 	ret = -ENOMEM;
582*592ffb21SWarner Losh 
583*592ffb21SWarner Losh 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
584*592ffb21SWarner Losh 			    dev_to_node(dev));
585*592ffb21SWarner Losh 	if (!pool)
586*592ffb21SWarner Losh 		goto err_mem;
587*592ffb21SWarner Losh 
588*592ffb21SWarner Losh 	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
589*592ffb21SWarner Losh 				dev_to_node(dev));
590*592ffb21SWarner Losh 	if (!sec_pool)
591*592ffb21SWarner Losh 		goto err_mem;
592*592ffb21SWarner Losh 
593*592ffb21SWarner Losh 	INIT_LIST_HEAD(&sec_pool->pools);
594*592ffb21SWarner Losh 	sec_pool->dev = dev;
595*592ffb21SWarner Losh 	sec_pool->pool =  pool;
596*592ffb21SWarner Losh 
597*592ffb21SWarner Losh 	INIT_LIST_HEAD(&pool->free_list);
598*592ffb21SWarner Losh 	INIT_LIST_HEAD(&pool->inuse_list);
599*592ffb21SWarner Losh 	INIT_LIST_HEAD(&pool->pools);
600*592ffb21SWarner Losh 	spin_lock_init(&pool->lock);
601*592ffb21SWarner Losh 	pool->dev = dev;
602*592ffb21SWarner Losh 	pool->npages_free = pool->npages_in_use = 0;
603*592ffb21SWarner Losh 	pool->nfrees = 0;
604*592ffb21SWarner Losh 	pool->gfp_flags = flags;
605*592ffb21SWarner Losh 	pool->size = PAGE_SIZE;
606*592ffb21SWarner Losh 	pool->type = type;
607*592ffb21SWarner Losh 	pool->nrefills = 0;
608*592ffb21SWarner Losh 	p = pool->name;
609*592ffb21SWarner Losh 	for (i = 0; i < 5; i++) {
610*592ffb21SWarner Losh 		if (type & t[i]) {
611*592ffb21SWarner Losh 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
612*592ffb21SWarner Losh 				      "%s", n[i]);
613*592ffb21SWarner Losh 		}
614*592ffb21SWarner Losh 	}
615*592ffb21SWarner Losh 	*p = 0;
616*592ffb21SWarner Losh 	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
617*592ffb21SWarner Losh 	 * - the kobj->name has already been deallocated.*/
618*592ffb21SWarner Losh 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
619*592ffb21SWarner Losh 		 dev_driver_string(dev), dev_name(dev));
620*592ffb21SWarner Losh 	mutex_lock(&_manager->lock);
621*592ffb21SWarner Losh 	/* You can get the dma_pool from either the global: */
622*592ffb21SWarner Losh 	list_add(&sec_pool->pools, &_manager->pools);
623*592ffb21SWarner Losh 	_manager->npools++;
624*592ffb21SWarner Losh 	/* or from 'struct device': */
625*592ffb21SWarner Losh 	list_add(&pool->pools, &dev->dma_pools);
626*592ffb21SWarner Losh 	mutex_unlock(&_manager->lock);
627*592ffb21SWarner Losh 
628*592ffb21SWarner Losh 	*ptr = pool;
629*592ffb21SWarner Losh 	devres_add(dev, ptr);
630*592ffb21SWarner Losh 
631*592ffb21SWarner Losh 	return pool;
632*592ffb21SWarner Losh err_mem:
633*592ffb21SWarner Losh 	devres_free(ptr);
634*592ffb21SWarner Losh 	kfree(sec_pool);
635*592ffb21SWarner Losh 	kfree(pool);
636*592ffb21SWarner Losh 	return ERR_PTR(ret);
637*592ffb21SWarner Losh }
638*592ffb21SWarner Losh 
ttm_dma_find_pool(struct device * dev,enum pool_type type)639*592ffb21SWarner Losh static struct dma_pool *ttm_dma_find_pool(struct device *dev,
640*592ffb21SWarner Losh 					  enum pool_type type)
641*592ffb21SWarner Losh {
642*592ffb21SWarner Losh 	struct dma_pool *pool, *tmp, *found = NULL;
643*592ffb21SWarner Losh 
644*592ffb21SWarner Losh 	if (type == IS_UNDEFINED)
645*592ffb21SWarner Losh 		return found;
646*592ffb21SWarner Losh 
647*592ffb21SWarner Losh 	/* NB: We iterate on the 'struct dev' which has no spinlock, but
648*592ffb21SWarner Losh 	 * it does have a kref which we have taken. The kref is taken during
649*592ffb21SWarner Losh 	 * graphic driver loading - in the drm_pci_init it calls either
650*592ffb21SWarner Losh 	 * pci_dev_get or pci_register_driver which both end up taking a kref
651*592ffb21SWarner Losh 	 * on 'struct device'.
652*592ffb21SWarner Losh 	 *
653*592ffb21SWarner Losh 	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
654*592ffb21SWarner Losh 	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
655*592ffb21SWarner Losh 	 * thing is at that point of time there are no pages associated with the
656*592ffb21SWarner Losh 	 * driver so this function will not be called.
657*592ffb21SWarner Losh 	 */
658*592ffb21SWarner Losh 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
659*592ffb21SWarner Losh 		if (pool->type != type)
660*592ffb21SWarner Losh 			continue;
661*592ffb21SWarner Losh 		found = pool;
662*592ffb21SWarner Losh 		break;
663*592ffb21SWarner Losh 	}
664*592ffb21SWarner Losh 	return found;
665*592ffb21SWarner Losh }
666*592ffb21SWarner Losh 
667*592ffb21SWarner Losh /*
668*592ffb21SWarner Losh  * Free pages the pages that failed to change the caching state. If there
669*592ffb21SWarner Losh  * are pages that have changed their caching state already put them to the
670*592ffb21SWarner Losh  * pool.
671*592ffb21SWarner Losh  */
ttm_dma_handle_caching_state_failure(struct dma_pool * pool,struct list_head * d_pages,struct page ** failed_pages,unsigned cpages)672*592ffb21SWarner Losh static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
673*592ffb21SWarner Losh 						 struct list_head *d_pages,
674*592ffb21SWarner Losh 						 struct page **failed_pages,
675*592ffb21SWarner Losh 						 unsigned cpages)
676*592ffb21SWarner Losh {
677*592ffb21SWarner Losh 	struct dma_page *d_page, *tmp;
678*592ffb21SWarner Losh 	struct page *p;
679*592ffb21SWarner Losh 	unsigned i = 0;
680*592ffb21SWarner Losh 
681*592ffb21SWarner Losh 	p = failed_pages[0];
682*592ffb21SWarner Losh 	if (!p)
683*592ffb21SWarner Losh 		return;
684*592ffb21SWarner Losh 	/* Find the failed page. */
685*592ffb21SWarner Losh 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
686*592ffb21SWarner Losh 		if (d_page->p != p)
687*592ffb21SWarner Losh 			continue;
688*592ffb21SWarner Losh 		/* .. and then progress over the full list. */
689*592ffb21SWarner Losh 		list_del(&d_page->page_list);
690*592ffb21SWarner Losh 		__ttm_dma_free_page(pool, d_page);
691*592ffb21SWarner Losh 		if (++i < cpages)
692*592ffb21SWarner Losh 			p = failed_pages[i];
693*592ffb21SWarner Losh 		else
694*592ffb21SWarner Losh 			break;
695*592ffb21SWarner Losh 	}
696*592ffb21SWarner Losh 
697*592ffb21SWarner Losh }
698*592ffb21SWarner Losh 
699*592ffb21SWarner Losh /*
700*592ffb21SWarner Losh  * Allocate 'count' pages, and put 'need' number of them on the
701*592ffb21SWarner Losh  * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
702*592ffb21SWarner Losh  * The full list of pages should also be on 'd_pages'.
703*592ffb21SWarner Losh  * We return zero for success, and negative numbers as errors.
704*592ffb21SWarner Losh  */
ttm_dma_pool_alloc_new_pages(struct dma_pool * pool,struct list_head * d_pages,unsigned count)705*592ffb21SWarner Losh static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
706*592ffb21SWarner Losh 					struct list_head *d_pages,
707*592ffb21SWarner Losh 					unsigned count)
708*592ffb21SWarner Losh {
709*592ffb21SWarner Losh 	struct page **caching_array;
710*592ffb21SWarner Losh 	struct dma_page *dma_p;
711*592ffb21SWarner Losh 	struct page *p;
712*592ffb21SWarner Losh 	int r = 0;
713*592ffb21SWarner Losh 	unsigned i, cpages;
714*592ffb21SWarner Losh 	unsigned max_cpages = min(count,
715*592ffb21SWarner Losh 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
716*592ffb21SWarner Losh 
717*592ffb21SWarner Losh 	/* allocate array for page caching change */
718*592ffb21SWarner Losh 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
719*592ffb21SWarner Losh 
720*592ffb21SWarner Losh 	if (!caching_array) {
721*592ffb21SWarner Losh 		pr_err("%s: Unable to allocate table for new pages\n",
722*592ffb21SWarner Losh 		       pool->dev_name);
723*592ffb21SWarner Losh 		return -ENOMEM;
724*592ffb21SWarner Losh 	}
725*592ffb21SWarner Losh 
726*592ffb21SWarner Losh 	if (count > 1) {
727*592ffb21SWarner Losh 		pr_debug("%s: (%s:%d) Getting %d pages\n",
728*592ffb21SWarner Losh 			 pool->dev_name, pool->name, current->pid, count);
729*592ffb21SWarner Losh 	}
730*592ffb21SWarner Losh 
731*592ffb21SWarner Losh 	for (i = 0, cpages = 0; i < count; ++i) {
732*592ffb21SWarner Losh 		dma_p = __ttm_dma_alloc_page(pool);
733*592ffb21SWarner Losh 		if (!dma_p) {
734*592ffb21SWarner Losh 			pr_err("%s: Unable to get page %u\n",
735*592ffb21SWarner Losh 			       pool->dev_name, i);
736*592ffb21SWarner Losh 
737*592ffb21SWarner Losh 			/* store already allocated pages in the pool after
738*592ffb21SWarner Losh 			 * setting the caching state */
739*592ffb21SWarner Losh 			if (cpages) {
740*592ffb21SWarner Losh 				r = ttm_set_pages_caching(pool, caching_array,
741*592ffb21SWarner Losh 							  cpages);
742*592ffb21SWarner Losh 				if (r)
743*592ffb21SWarner Losh 					ttm_dma_handle_caching_state_failure(
744*592ffb21SWarner Losh 						pool, d_pages, caching_array,
745*592ffb21SWarner Losh 						cpages);
746*592ffb21SWarner Losh 			}
747*592ffb21SWarner Losh 			r = -ENOMEM;
748*592ffb21SWarner Losh 			goto out;
749*592ffb21SWarner Losh 		}
750*592ffb21SWarner Losh 		p = dma_p->p;
751*592ffb21SWarner Losh #ifdef CONFIG_HIGHMEM
752*592ffb21SWarner Losh 		/* gfp flags of highmem page should never be dma32 so we
753*592ffb21SWarner Losh 		 * we should be fine in such case
754*592ffb21SWarner Losh 		 */
755*592ffb21SWarner Losh 		if (!PageHighMem(p))
756*592ffb21SWarner Losh #endif
757*592ffb21SWarner Losh 		{
758*592ffb21SWarner Losh 			caching_array[cpages++] = p;
759*592ffb21SWarner Losh 			if (cpages == max_cpages) {
760*592ffb21SWarner Losh 				/* Note: Cannot hold the spinlock */
761*592ffb21SWarner Losh 				r = ttm_set_pages_caching(pool, caching_array,
762*592ffb21SWarner Losh 						 cpages);
763*592ffb21SWarner Losh 				if (r) {
764*592ffb21SWarner Losh 					ttm_dma_handle_caching_state_failure(
765*592ffb21SWarner Losh 						pool, d_pages, caching_array,
766*592ffb21SWarner Losh 						cpages);
767*592ffb21SWarner Losh 					goto out;
768*592ffb21SWarner Losh 				}
769*592ffb21SWarner Losh 				cpages = 0;
770*592ffb21SWarner Losh 			}
771*592ffb21SWarner Losh 		}
772*592ffb21SWarner Losh 		list_add(&dma_p->page_list, d_pages);
773*592ffb21SWarner Losh 	}
774*592ffb21SWarner Losh 
775*592ffb21SWarner Losh 	if (cpages) {
776*592ffb21SWarner Losh 		r = ttm_set_pages_caching(pool, caching_array, cpages);
777*592ffb21SWarner Losh 		if (r)
778*592ffb21SWarner Losh 			ttm_dma_handle_caching_state_failure(pool, d_pages,
779*592ffb21SWarner Losh 					caching_array, cpages);
780*592ffb21SWarner Losh 	}
781*592ffb21SWarner Losh out:
782*592ffb21SWarner Losh 	kfree(caching_array);
783*592ffb21SWarner Losh 	return r;
784*592ffb21SWarner Losh }
785*592ffb21SWarner Losh 
786*592ffb21SWarner Losh /*
787*592ffb21SWarner Losh  * @return count of pages still required to fulfill the request.
788*592ffb21SWarner Losh  */
ttm_dma_page_pool_fill_locked(struct dma_pool * pool,unsigned long * irq_flags)789*592ffb21SWarner Losh static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
790*592ffb21SWarner Losh 					 unsigned long *irq_flags)
791*592ffb21SWarner Losh {
792*592ffb21SWarner Losh 	unsigned count = _manager->options.small;
793*592ffb21SWarner Losh 	int r = pool->npages_free;
794*592ffb21SWarner Losh 
795*592ffb21SWarner Losh 	if (count > pool->npages_free) {
796*592ffb21SWarner Losh 		struct list_head d_pages;
797*592ffb21SWarner Losh 
798*592ffb21SWarner Losh 		INIT_LIST_HEAD(&d_pages);
799*592ffb21SWarner Losh 
800*592ffb21SWarner Losh 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
801*592ffb21SWarner Losh 
802*592ffb21SWarner Losh 		/* Returns how many more are necessary to fulfill the
803*592ffb21SWarner Losh 		 * request. */
804*592ffb21SWarner Losh 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
805*592ffb21SWarner Losh 
806*592ffb21SWarner Losh 		spin_lock_irqsave(&pool->lock, *irq_flags);
807*592ffb21SWarner Losh 		if (!r) {
808*592ffb21SWarner Losh 			/* Add the fresh to the end.. */
809*592ffb21SWarner Losh 			list_splice(&d_pages, &pool->free_list);
810*592ffb21SWarner Losh 			++pool->nrefills;
811*592ffb21SWarner Losh 			pool->npages_free += count;
812*592ffb21SWarner Losh 			r = count;
813*592ffb21SWarner Losh 		} else {
814*592ffb21SWarner Losh 			struct dma_page *d_page;
815*592ffb21SWarner Losh 			unsigned cpages = 0;
816*592ffb21SWarner Losh 
817*592ffb21SWarner Losh 			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
818*592ffb21SWarner Losh 			       pool->dev_name, pool->name, r);
819*592ffb21SWarner Losh 
820*592ffb21SWarner Losh 			list_for_each_entry(d_page, &d_pages, page_list) {
821*592ffb21SWarner Losh 				cpages++;
822*592ffb21SWarner Losh 			}
823*592ffb21SWarner Losh 			list_splice_tail(&d_pages, &pool->free_list);
824*592ffb21SWarner Losh 			pool->npages_free += cpages;
825*592ffb21SWarner Losh 			r = cpages;
826*592ffb21SWarner Losh 		}
827*592ffb21SWarner Losh 	}
828*592ffb21SWarner Losh 	return r;
829*592ffb21SWarner Losh }
830*592ffb21SWarner Losh 
831*592ffb21SWarner Losh /*
832*592ffb21SWarner Losh  * @return count of pages still required to fulfill the request.
833*592ffb21SWarner Losh  * The populate list is actually a stack (not that is matters as TTM
834*592ffb21SWarner Losh  * allocates one page at a time.
835*592ffb21SWarner Losh  */
ttm_dma_pool_get_pages(struct dma_pool * pool,struct ttm_dma_tt * ttm_dma,unsigned index)836*592ffb21SWarner Losh static int ttm_dma_pool_get_pages(struct dma_pool *pool,
837*592ffb21SWarner Losh 				  struct ttm_dma_tt *ttm_dma,
838*592ffb21SWarner Losh 				  unsigned index)
839*592ffb21SWarner Losh {
840*592ffb21SWarner Losh 	struct dma_page *d_page;
841*592ffb21SWarner Losh 	struct ttm_tt *ttm = &ttm_dma->ttm;
842*592ffb21SWarner Losh 	unsigned long irq_flags;
843*592ffb21SWarner Losh 	int count, r = -ENOMEM;
844*592ffb21SWarner Losh 
845*592ffb21SWarner Losh 	spin_lock_irqsave(&pool->lock, irq_flags);
846*592ffb21SWarner Losh 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
847*592ffb21SWarner Losh 	if (count) {
848*592ffb21SWarner Losh 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
849*592ffb21SWarner Losh 		ttm->pages[index] = d_page->p;
850*592ffb21SWarner Losh 		ttm_dma->dma_address[index] = d_page->dma;
851*592ffb21SWarner Losh 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
852*592ffb21SWarner Losh 		r = 0;
853*592ffb21SWarner Losh 		pool->npages_in_use += 1;
854*592ffb21SWarner Losh 		pool->npages_free -= 1;
855*592ffb21SWarner Losh 	}
856*592ffb21SWarner Losh 	spin_unlock_irqrestore(&pool->lock, irq_flags);
857*592ffb21SWarner Losh 	return r;
858*592ffb21SWarner Losh }
859*592ffb21SWarner Losh 
860*592ffb21SWarner Losh /*
861*592ffb21SWarner Losh  * On success pages list will hold count number of correctly
862*592ffb21SWarner Losh  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
863*592ffb21SWarner Losh  */
ttm_dma_populate(struct ttm_dma_tt * ttm_dma,struct device * dev)864*592ffb21SWarner Losh int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
865*592ffb21SWarner Losh {
866*592ffb21SWarner Losh 	struct ttm_tt *ttm = &ttm_dma->ttm;
867*592ffb21SWarner Losh 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
868*592ffb21SWarner Losh 	struct dma_pool *pool;
869*592ffb21SWarner Losh 	enum pool_type type;
870*592ffb21SWarner Losh 	unsigned i;
871*592ffb21SWarner Losh 	gfp_t gfp_flags;
872*592ffb21SWarner Losh 	int ret;
873*592ffb21SWarner Losh 
874*592ffb21SWarner Losh 	if (ttm->state != tt_unpopulated)
875*592ffb21SWarner Losh 		return 0;
876*592ffb21SWarner Losh 
877*592ffb21SWarner Losh 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
878*592ffb21SWarner Losh 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
879*592ffb21SWarner Losh 		gfp_flags = GFP_USER | GFP_DMA32;
880*592ffb21SWarner Losh 	else
881*592ffb21SWarner Losh 		gfp_flags = GFP_HIGHUSER;
882*592ffb21SWarner Losh 	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
883*592ffb21SWarner Losh 		gfp_flags |= __GFP_ZERO;
884*592ffb21SWarner Losh 
885*592ffb21SWarner Losh 	pool = ttm_dma_find_pool(dev, type);
886*592ffb21SWarner Losh 	if (!pool) {
887*592ffb21SWarner Losh 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
888*592ffb21SWarner Losh 		if (IS_ERR_OR_NULL(pool)) {
889*592ffb21SWarner Losh 			return -ENOMEM;
890*592ffb21SWarner Losh 		}
891*592ffb21SWarner Losh 	}
892*592ffb21SWarner Losh 
893*592ffb21SWarner Losh 	INIT_LIST_HEAD(&ttm_dma->pages_list);
894*592ffb21SWarner Losh 	for (i = 0; i < ttm->num_pages; ++i) {
895*592ffb21SWarner Losh 		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
896*592ffb21SWarner Losh 		if (ret != 0) {
897*592ffb21SWarner Losh 			ttm_dma_unpopulate(ttm_dma, dev);
898*592ffb21SWarner Losh 			return -ENOMEM;
899*592ffb21SWarner Losh 		}
900*592ffb21SWarner Losh 
901*592ffb21SWarner Losh 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
902*592ffb21SWarner Losh 						false, false);
903*592ffb21SWarner Losh 		if (unlikely(ret != 0)) {
904*592ffb21SWarner Losh 			ttm_dma_unpopulate(ttm_dma, dev);
905*592ffb21SWarner Losh 			return -ENOMEM;
906*592ffb21SWarner Losh 		}
907*592ffb21SWarner Losh 	}
908*592ffb21SWarner Losh 
909*592ffb21SWarner Losh 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
910*592ffb21SWarner Losh 		ret = ttm_tt_swapin(ttm);
911*592ffb21SWarner Losh 		if (unlikely(ret != 0)) {
912*592ffb21SWarner Losh 			ttm_dma_unpopulate(ttm_dma, dev);
913*592ffb21SWarner Losh 			return ret;
914*592ffb21SWarner Losh 		}
915*592ffb21SWarner Losh 	}
916*592ffb21SWarner Losh 
917*592ffb21SWarner Losh 	ttm->state = tt_unbound;
918*592ffb21SWarner Losh 	return 0;
919*592ffb21SWarner Losh }
920*592ffb21SWarner Losh EXPORT_SYMBOL_GPL(ttm_dma_populate);
921*592ffb21SWarner Losh 
922*592ffb21SWarner Losh /* Get good estimation how many pages are free in pools */
ttm_dma_pool_get_num_unused_pages(void)923*592ffb21SWarner Losh static int ttm_dma_pool_get_num_unused_pages(void)
924*592ffb21SWarner Losh {
925*592ffb21SWarner Losh 	struct device_pools *p;
926*592ffb21SWarner Losh 	unsigned total = 0;
927*592ffb21SWarner Losh 
928*592ffb21SWarner Losh 	mutex_lock(&_manager->lock);
929*592ffb21SWarner Losh 	list_for_each_entry(p, &_manager->pools, pools)
930*592ffb21SWarner Losh 		total += p->pool->npages_free;
931*592ffb21SWarner Losh 	mutex_unlock(&_manager->lock);
932*592ffb21SWarner Losh 	return total;
933*592ffb21SWarner Losh }
934*592ffb21SWarner Losh 
935*592ffb21SWarner Losh /* Put all pages in pages list to correct pool to wait for reuse */
ttm_dma_unpopulate(struct ttm_dma_tt * ttm_dma,struct device * dev)936*592ffb21SWarner Losh void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
937*592ffb21SWarner Losh {
938*592ffb21SWarner Losh 	struct ttm_tt *ttm = &ttm_dma->ttm;
939*592ffb21SWarner Losh 	struct dma_pool *pool;
940*592ffb21SWarner Losh 	struct dma_page *d_page, *next;
941*592ffb21SWarner Losh 	enum pool_type type;
942*592ffb21SWarner Losh 	bool is_cached = false;
943*592ffb21SWarner Losh 	unsigned count = 0, i, npages = 0;
944*592ffb21SWarner Losh 	unsigned long irq_flags;
945*592ffb21SWarner Losh 
946*592ffb21SWarner Losh 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
947*592ffb21SWarner Losh 	pool = ttm_dma_find_pool(dev, type);
948*592ffb21SWarner Losh 	if (!pool)
949*592ffb21SWarner Losh 		return;
950*592ffb21SWarner Losh 
951*592ffb21SWarner Losh 	is_cached = (ttm_dma_find_pool(pool->dev,
952*592ffb21SWarner Losh 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
953*592ffb21SWarner Losh 
954*592ffb21SWarner Losh 	/* make sure pages array match list and count number of pages */
955*592ffb21SWarner Losh 	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
956*592ffb21SWarner Losh 		ttm->pages[count] = d_page->p;
957*592ffb21SWarner Losh 		count++;
958*592ffb21SWarner Losh 	}
959*592ffb21SWarner Losh 
960*592ffb21SWarner Losh 	spin_lock_irqsave(&pool->lock, irq_flags);
961*592ffb21SWarner Losh 	pool->npages_in_use -= count;
962*592ffb21SWarner Losh 	if (is_cached) {
963*592ffb21SWarner Losh 		pool->nfrees += count;
964*592ffb21SWarner Losh 	} else {
965*592ffb21SWarner Losh 		pool->npages_free += count;
966*592ffb21SWarner Losh 		list_splice(&ttm_dma->pages_list, &pool->free_list);
967*592ffb21SWarner Losh 		npages = count;
968*592ffb21SWarner Losh 		if (pool->npages_free > _manager->options.max_size) {
969*592ffb21SWarner Losh 			npages = pool->npages_free - _manager->options.max_size;
970*592ffb21SWarner Losh 			/* free at least NUM_PAGES_TO_ALLOC number of pages
971*592ffb21SWarner Losh 			 * to reduce calls to set_memory_wb */
972*592ffb21SWarner Losh 			if (npages < NUM_PAGES_TO_ALLOC)
973*592ffb21SWarner Losh 				npages = NUM_PAGES_TO_ALLOC;
974*592ffb21SWarner Losh 		}
975*592ffb21SWarner Losh 	}
976*592ffb21SWarner Losh 	spin_unlock_irqrestore(&pool->lock, irq_flags);
977*592ffb21SWarner Losh 
978*592ffb21SWarner Losh 	if (is_cached) {
979*592ffb21SWarner Losh 		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
980*592ffb21SWarner Losh 			ttm_mem_global_free_page(ttm->glob->mem_glob,
981*592ffb21SWarner Losh 						 d_page->p);
982*592ffb21SWarner Losh 			ttm_dma_page_put(pool, d_page);
983*592ffb21SWarner Losh 		}
984*592ffb21SWarner Losh 	} else {
985*592ffb21SWarner Losh 		for (i = 0; i < count; i++) {
986*592ffb21SWarner Losh 			ttm_mem_global_free_page(ttm->glob->mem_glob,
987*592ffb21SWarner Losh 						 ttm->pages[i]);
988*592ffb21SWarner Losh 		}
989*592ffb21SWarner Losh 	}
990*592ffb21SWarner Losh 
991*592ffb21SWarner Losh 	INIT_LIST_HEAD(&ttm_dma->pages_list);
992*592ffb21SWarner Losh 	for (i = 0; i < ttm->num_pages; i++) {
993*592ffb21SWarner Losh 		ttm->pages[i] = NULL;
994*592ffb21SWarner Losh 		ttm_dma->dma_address[i] = 0;
995*592ffb21SWarner Losh 	}
996*592ffb21SWarner Losh 
997*592ffb21SWarner Losh 	/* shrink pool if necessary (only on !is_cached pools)*/
998*592ffb21SWarner Losh 	if (npages)
999*592ffb21SWarner Losh 		ttm_dma_page_pool_free(pool, npages);
1000*592ffb21SWarner Losh 	ttm->state = tt_unpopulated;
1001*592ffb21SWarner Losh }
1002*592ffb21SWarner Losh EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1003*592ffb21SWarner Losh 
1004*592ffb21SWarner Losh /**
1005*592ffb21SWarner Losh  * Callback for mm to request pool to reduce number of page held.
1006*592ffb21SWarner Losh  */
ttm_dma_pool_mm_shrink(struct shrinker * shrink,struct shrink_control * sc)1007*592ffb21SWarner Losh static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1008*592ffb21SWarner Losh 				  struct shrink_control *sc)
1009*592ffb21SWarner Losh {
1010*592ffb21SWarner Losh 	static atomic_t start_pool = ATOMIC_INIT(0);
1011*592ffb21SWarner Losh 	unsigned idx = 0;
1012*592ffb21SWarner Losh 	unsigned pool_offset = atomic_add_return(1, &start_pool);
1013*592ffb21SWarner Losh 	unsigned shrink_pages = sc->nr_to_scan;
1014*592ffb21SWarner Losh 	struct device_pools *p;
1015*592ffb21SWarner Losh 
1016*592ffb21SWarner Losh 	if (list_empty(&_manager->pools))
1017*592ffb21SWarner Losh 		return 0;
1018*592ffb21SWarner Losh 
1019*592ffb21SWarner Losh 	mutex_lock(&_manager->lock);
1020*592ffb21SWarner Losh 	pool_offset = pool_offset % _manager->npools;
1021*592ffb21SWarner Losh 	list_for_each_entry(p, &_manager->pools, pools) {
1022*592ffb21SWarner Losh 		unsigned nr_free;
1023*592ffb21SWarner Losh 
1024*592ffb21SWarner Losh 		if (!p->dev)
1025*592ffb21SWarner Losh 			continue;
1026*592ffb21SWarner Losh 		if (shrink_pages == 0)
1027*592ffb21SWarner Losh 			break;
1028*592ffb21SWarner Losh 		/* Do it in round-robin fashion. */
1029*592ffb21SWarner Losh 		if (++idx < pool_offset)
1030*592ffb21SWarner Losh 			continue;
1031*592ffb21SWarner Losh 		nr_free = shrink_pages;
1032*592ffb21SWarner Losh 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1033*592ffb21SWarner Losh 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1034*592ffb21SWarner Losh 			 p->pool->dev_name, p->pool->name, current->pid,
1035*592ffb21SWarner Losh 			 nr_free, shrink_pages);
1036*592ffb21SWarner Losh 	}
1037*592ffb21SWarner Losh 	mutex_unlock(&_manager->lock);
1038*592ffb21SWarner Losh 	/* return estimated number of unused pages in pool */
1039*592ffb21SWarner Losh 	return ttm_dma_pool_get_num_unused_pages();
1040*592ffb21SWarner Losh }
1041*592ffb21SWarner Losh 
ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager * manager)1042*592ffb21SWarner Losh static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1043*592ffb21SWarner Losh {
1044*592ffb21SWarner Losh 	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1045*592ffb21SWarner Losh 	manager->mm_shrink.seeks = 1;
1046*592ffb21SWarner Losh 	register_shrinker(&manager->mm_shrink);
1047*592ffb21SWarner Losh }
1048*592ffb21SWarner Losh 
ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager * manager)1049*592ffb21SWarner Losh static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1050*592ffb21SWarner Losh {
1051*592ffb21SWarner Losh 	unregister_shrinker(&manager->mm_shrink);
1052*592ffb21SWarner Losh }
1053*592ffb21SWarner Losh 
ttm_dma_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)1054*592ffb21SWarner Losh int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1055*592ffb21SWarner Losh {
1056*592ffb21SWarner Losh 	int ret = -ENOMEM;
1057*592ffb21SWarner Losh 
1058*592ffb21SWarner Losh 	WARN_ON(_manager);
1059*592ffb21SWarner Losh 
1060*592ffb21SWarner Losh 	pr_info("Initializing DMA pool allocator\n");
1061*592ffb21SWarner Losh 
1062*592ffb21SWarner Losh 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1063*592ffb21SWarner Losh 	if (!_manager)
1064*592ffb21SWarner Losh 		goto err;
1065*592ffb21SWarner Losh 
1066*592ffb21SWarner Losh 	mutex_init(&_manager->lock);
1067*592ffb21SWarner Losh 	INIT_LIST_HEAD(&_manager->pools);
1068*592ffb21SWarner Losh 
1069*592ffb21SWarner Losh 	_manager->options.max_size = max_pages;
1070*592ffb21SWarner Losh 	_manager->options.small = SMALL_ALLOCATION;
1071*592ffb21SWarner Losh 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1072*592ffb21SWarner Losh 
1073*592ffb21SWarner Losh 	/* This takes care of auto-freeing the _manager */
1074*592ffb21SWarner Losh 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1075*592ffb21SWarner Losh 				   &glob->kobj, "dma_pool");
1076*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
1077*592ffb21SWarner Losh 		kobject_put(&_manager->kobj);
1078*592ffb21SWarner Losh 		goto err;
1079*592ffb21SWarner Losh 	}
1080*592ffb21SWarner Losh 	ttm_dma_pool_mm_shrink_init(_manager);
1081*592ffb21SWarner Losh 	return 0;
1082*592ffb21SWarner Losh err:
1083*592ffb21SWarner Losh 	return ret;
1084*592ffb21SWarner Losh }
1085*592ffb21SWarner Losh 
ttm_dma_page_alloc_fini(void)1086*592ffb21SWarner Losh void ttm_dma_page_alloc_fini(void)
1087*592ffb21SWarner Losh {
1088*592ffb21SWarner Losh 	struct device_pools *p, *t;
1089*592ffb21SWarner Losh 
1090*592ffb21SWarner Losh 	pr_info("Finalizing DMA pool allocator\n");
1091*592ffb21SWarner Losh 	ttm_dma_pool_mm_shrink_fini(_manager);
1092*592ffb21SWarner Losh 
1093*592ffb21SWarner Losh 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1094*592ffb21SWarner Losh 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1095*592ffb21SWarner Losh 			current->pid);
1096*592ffb21SWarner Losh 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1097*592ffb21SWarner Losh 			ttm_dma_pool_match, p->pool));
1098*592ffb21SWarner Losh 		ttm_dma_free_pool(p->dev, p->pool->type);
1099*592ffb21SWarner Losh 	}
1100*592ffb21SWarner Losh 	kobject_put(&_manager->kobj);
1101*592ffb21SWarner Losh 	_manager = NULL;
1102*592ffb21SWarner Losh }
1103*592ffb21SWarner Losh 
ttm_dma_page_alloc_debugfs(struct seq_file * m,void * data)1104*592ffb21SWarner Losh int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1105*592ffb21SWarner Losh {
1106*592ffb21SWarner Losh 	struct device_pools *p;
1107*592ffb21SWarner Losh 	struct dma_pool *pool = NULL;
1108*592ffb21SWarner Losh 	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1109*592ffb21SWarner Losh 		     "name", "virt", "busaddr"};
1110*592ffb21SWarner Losh 
1111*592ffb21SWarner Losh 	if (!_manager) {
1112*592ffb21SWarner Losh 		seq_printf(m, "No pool allocator running.\n");
1113*592ffb21SWarner Losh 		return 0;
1114*592ffb21SWarner Losh 	}
1115*592ffb21SWarner Losh 	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1116*592ffb21SWarner Losh 		   h[0], h[1], h[2], h[3], h[4], h[5]);
1117*592ffb21SWarner Losh 	mutex_lock(&_manager->lock);
1118*592ffb21SWarner Losh 	list_for_each_entry(p, &_manager->pools, pools) {
1119*592ffb21SWarner Losh 		struct device *dev = p->dev;
1120*592ffb21SWarner Losh 		if (!dev)
1121*592ffb21SWarner Losh 			continue;
1122*592ffb21SWarner Losh 		pool = p->pool;
1123*592ffb21SWarner Losh 		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1124*592ffb21SWarner Losh 				pool->name, pool->nrefills,
1125*592ffb21SWarner Losh 				pool->nfrees, pool->npages_in_use,
1126*592ffb21SWarner Losh 				pool->npages_free,
1127*592ffb21SWarner Losh 				pool->dev_name);
1128*592ffb21SWarner Losh 	}
1129*592ffb21SWarner Losh 	mutex_unlock(&_manager->lock);
1130*592ffb21SWarner Losh 	return 0;
1131*592ffb21SWarner Losh }
1132*592ffb21SWarner Losh EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1133