1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2020 Google LLC
5 */
6 #include <linux/cma.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
10 #include <linux/init.h>
11 #include <linux/genalloc.h>
12 #include <linux/set_memory.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
15
16 static struct gen_pool *atomic_pool_dma __ro_after_init;
17 static unsigned long pool_size_dma;
18 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
19 static unsigned long pool_size_dma32;
20 static struct gen_pool *atomic_pool_kernel __ro_after_init;
21 static unsigned long pool_size_kernel;
22
23 /* Size can be defined by the coherent_pool command line */
24 static size_t atomic_pool_size;
25
26 /* Dynamic background expansion when the atomic pool is near capacity */
27 static struct work_struct atomic_pool_work;
28
early_coherent_pool(char * p)29 static int __init early_coherent_pool(char *p)
30 {
31 atomic_pool_size = memparse(p, &p);
32 return 0;
33 }
34 early_param("coherent_pool", early_coherent_pool);
35
dma_atomic_pool_debugfs_init(void)36 static void __init dma_atomic_pool_debugfs_init(void)
37 {
38 struct dentry *root;
39
40 root = debugfs_create_dir("dma_pools", NULL);
41 debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
42 debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
43 debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
44 }
45
dma_atomic_pool_size_add(gfp_t gfp,size_t size)46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
47 {
48 if (gfp & __GFP_DMA)
49 pool_size_dma += size;
50 else if (gfp & __GFP_DMA32)
51 pool_size_dma32 += size;
52 else
53 pool_size_kernel += size;
54 }
55
cma_in_zone(gfp_t gfp)56 static bool cma_in_zone(gfp_t gfp)
57 {
58 unsigned long size;
59 phys_addr_t end;
60 struct cma *cma;
61
62 cma = dev_get_cma_area(NULL);
63 if (!cma)
64 return false;
65
66 size = cma_get_size(cma);
67 if (!size)
68 return false;
69
70 /* CMA can't cross zone boundaries, see cma_activate_area() */
71 end = cma_get_base(cma) + size - 1;
72 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
73 return end <= zone_dma_limit;
74 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
75 return end <= max(DMA_BIT_MASK(32), zone_dma_limit);
76 return true;
77 }
78
atomic_pool_expand(struct gen_pool * pool,size_t pool_size,gfp_t gfp)79 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
80 gfp_t gfp)
81 {
82 unsigned int order;
83 struct page *page = NULL;
84 void *addr;
85 int ret = -ENOMEM;
86
87 /* Cannot allocate larger than MAX_PAGE_ORDER */
88 order = min(get_order(pool_size), MAX_PAGE_ORDER);
89
90 do {
91 pool_size = 1 << (PAGE_SHIFT + order);
92 if (cma_in_zone(gfp))
93 page = dma_alloc_from_contiguous(NULL, 1 << order,
94 order, false);
95 if (!page)
96 page = alloc_pages(gfp | __GFP_NOWARN, order);
97 } while (!page && order-- > 0);
98 if (!page)
99 goto out;
100
101 arch_dma_prep_coherent(page, pool_size);
102
103 #ifdef CONFIG_DMA_DIRECT_REMAP
104 addr = dma_common_contiguous_remap(page, pool_size,
105 pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)),
106 __builtin_return_address(0));
107 if (!addr)
108 goto free_page;
109 #else
110 addr = page_to_virt(page);
111 #endif
112 /*
113 * Memory in the atomic DMA pools must be unencrypted, the pools do not
114 * shrink so no re-encryption occurs in dma_direct_free().
115 */
116 ret = set_memory_decrypted((unsigned long)page_to_virt(page),
117 1 << order);
118 if (ret)
119 goto remove_mapping;
120 ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
121 pool_size, NUMA_NO_NODE);
122 if (ret)
123 goto encrypt_mapping;
124
125 dma_atomic_pool_size_add(gfp, pool_size);
126 return 0;
127
128 encrypt_mapping:
129 ret = set_memory_encrypted((unsigned long)page_to_virt(page),
130 1 << order);
131 if (WARN_ON_ONCE(ret)) {
132 /* Decrypt succeeded but encrypt failed, purposely leak */
133 goto out;
134 }
135 remove_mapping:
136 #ifdef CONFIG_DMA_DIRECT_REMAP
137 dma_common_free_remap(addr, pool_size);
138 free_page:
139 __free_pages(page, order);
140 #endif
141 out:
142 return ret;
143 }
144
atomic_pool_resize(struct gen_pool * pool,gfp_t gfp)145 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
146 {
147 if (pool && gen_pool_avail(pool) < atomic_pool_size)
148 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
149 }
150
atomic_pool_work_fn(struct work_struct * work)151 static void atomic_pool_work_fn(struct work_struct *work)
152 {
153 if (IS_ENABLED(CONFIG_ZONE_DMA))
154 atomic_pool_resize(atomic_pool_dma,
155 GFP_KERNEL | GFP_DMA);
156 if (IS_ENABLED(CONFIG_ZONE_DMA32))
157 atomic_pool_resize(atomic_pool_dma32,
158 GFP_KERNEL | GFP_DMA32);
159 atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
160 }
161
__dma_atomic_pool_init(size_t pool_size,gfp_t gfp)162 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
163 gfp_t gfp)
164 {
165 struct gen_pool *pool;
166 int ret;
167
168 pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
169 if (!pool)
170 return NULL;
171
172 gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
173
174 ret = atomic_pool_expand(pool, pool_size, gfp);
175 if (ret) {
176 gen_pool_destroy(pool);
177 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
178 pool_size >> 10, &gfp);
179 return NULL;
180 }
181
182 pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
183 gen_pool_size(pool) >> 10, &gfp);
184 return pool;
185 }
186
187 #ifdef CONFIG_ZONE_DMA32
188 #define has_managed_dma32 has_managed_zone(ZONE_DMA32)
189 #else
190 #define has_managed_dma32 false
191 #endif
192
dma_atomic_pool_init(void)193 static int __init dma_atomic_pool_init(void)
194 {
195 int ret = 0;
196
197 /*
198 * If coherent_pool was not used on the command line, default the pool
199 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_PAGE_ORDER.
200 */
201 if (!atomic_pool_size) {
202 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
203 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
204 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
205 }
206 INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
207
208 /* All memory might be in the DMA zone(s) to begin with */
209 if (has_managed_zone(ZONE_NORMAL)) {
210 atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
211 GFP_KERNEL);
212 if (!atomic_pool_kernel)
213 ret = -ENOMEM;
214 }
215 if (has_managed_dma()) {
216 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
217 GFP_KERNEL | GFP_DMA);
218 if (!atomic_pool_dma)
219 ret = -ENOMEM;
220 }
221 if (has_managed_dma32) {
222 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
223 GFP_KERNEL | GFP_DMA32);
224 if (!atomic_pool_dma32)
225 ret = -ENOMEM;
226 }
227
228 dma_atomic_pool_debugfs_init();
229 return ret;
230 }
231 postcore_initcall(dma_atomic_pool_init);
232
dma_guess_pool(struct gen_pool * prev,gfp_t gfp)233 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
234 {
235 if (prev == NULL) {
236 if (gfp & GFP_DMA)
237 return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
238 if (gfp & GFP_DMA32)
239 return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
240 return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
241 }
242 if (prev == atomic_pool_kernel)
243 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
244 if (prev == atomic_pool_dma32)
245 return atomic_pool_dma;
246 return NULL;
247 }
248
__dma_alloc_from_pool(struct device * dev,size_t size,struct gen_pool * pool,void ** cpu_addr,bool (* phys_addr_ok)(struct device *,phys_addr_t,size_t))249 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
250 struct gen_pool *pool, void **cpu_addr,
251 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
252 {
253 unsigned long addr;
254 phys_addr_t phys;
255
256 addr = gen_pool_alloc(pool, size);
257 if (!addr)
258 return NULL;
259
260 phys = gen_pool_virt_to_phys(pool, addr);
261 if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
262 gen_pool_free(pool, addr, size);
263 return NULL;
264 }
265
266 if (gen_pool_avail(pool) < atomic_pool_size)
267 schedule_work(&atomic_pool_work);
268
269 *cpu_addr = (void *)addr;
270 memset(*cpu_addr, 0, size);
271 return pfn_to_page(__phys_to_pfn(phys));
272 }
273
dma_alloc_from_pool(struct device * dev,size_t size,void ** cpu_addr,gfp_t gfp,bool (* phys_addr_ok)(struct device *,phys_addr_t,size_t))274 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
275 void **cpu_addr, gfp_t gfp,
276 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
277 {
278 struct gen_pool *pool = NULL;
279 struct page *page;
280
281 while ((pool = dma_guess_pool(pool, gfp))) {
282 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
283 phys_addr_ok);
284 if (page)
285 return page;
286 }
287
288 WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
289 return NULL;
290 }
291
dma_free_from_pool(struct device * dev,void * start,size_t size)292 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
293 {
294 struct gen_pool *pool = NULL;
295
296 while ((pool = dma_guess_pool(pool, 0))) {
297 if (!gen_pool_has_addr(pool, (unsigned long)start, size))
298 continue;
299 gen_pool_free(pool, (unsigned long)start, size);
300 return true;
301 }
302
303 return false;
304 }
305