xref: /linux/drivers/firmware/qcom/qcom_tzmem.c (revision 297d9111e9fcf47dd1dcc6f79bba915f35378d01)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory allocator for buffers shared with the TrustZone.
4  *
5  * Copyright (C) 2023-2024 Linaro Ltd.
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 
23 #include "qcom_tzmem.h"
24 
25 struct qcom_tzmem_area {
26 	struct list_head list;
27 	void *vaddr;
28 	dma_addr_t paddr;
29 	size_t size;
30 	void *priv;
31 };
32 
33 struct qcom_tzmem_pool {
34 	struct gen_pool *genpool;
35 	struct list_head areas;
36 	enum qcom_tzmem_policy policy;
37 	size_t increment;
38 	size_t max_size;
39 	spinlock_t lock;
40 };
41 
42 struct qcom_tzmem_chunk {
43 	size_t size;
44 	struct qcom_tzmem_pool *owner;
45 };
46 
47 static struct device *qcom_tzmem_dev;
48 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
49 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
50 
51 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
52 
qcom_tzmem_init(void)53 static int qcom_tzmem_init(void)
54 {
55 	return 0;
56 }
57 
qcom_tzmem_init_area(struct qcom_tzmem_area * area)58 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
59 {
60 	return 0;
61 }
62 
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)63 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
64 {
65 
66 }
67 
68 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
69 
70 #include <linux/firmware/qcom/qcom_scm.h>
71 #include <linux/of.h>
72 
73 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
74 
75 static bool qcom_tzmem_using_shm_bridge;
76 
77 /* List of machines that are known to not support SHM bridge correctly. */
78 static const char *const qcom_tzmem_blacklist[] = {
79 	"qcom,sc8180x",
80 	"qcom,sdm670", /* failure in GPU firmware loading */
81 	"qcom,sdm845", /* reset in rmtfs memory assignment */
82 	"qcom,sm7150", /* reset in rmtfs memory assignment */
83 	"qcom,sm8150", /* reset in rmtfs memory assignment */
84 	NULL
85 };
86 
qcom_tzmem_init(void)87 static int qcom_tzmem_init(void)
88 {
89 	const char *const *platform;
90 	int ret;
91 
92 	for (platform = qcom_tzmem_blacklist; *platform; platform++) {
93 		if (of_machine_is_compatible(*platform))
94 			goto notsupp;
95 	}
96 
97 	ret = qcom_scm_shm_bridge_enable();
98 	if (ret == -EOPNOTSUPP)
99 		goto notsupp;
100 
101 	if (!ret)
102 		qcom_tzmem_using_shm_bridge = true;
103 
104 	return ret;
105 
106 notsupp:
107 	dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
108 	return 0;
109 }
110 
qcom_tzmem_init_area(struct qcom_tzmem_area * area)111 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
112 {
113 	u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
114 	int ret;
115 
116 	if (!qcom_tzmem_using_shm_bridge)
117 		return 0;
118 
119 	pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
120 	ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
121 	size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
122 
123 	u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
124 	if (!handle)
125 		return -ENOMEM;
126 
127 	ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
128 					 ipfn_and_s_perm, size_and_flags,
129 					 QCOM_SCM_VMID_HLOS, handle);
130 	if (ret)
131 		return ret;
132 
133 	area->priv = no_free_ptr(handle);
134 
135 	return 0;
136 }
137 
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)138 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
139 {
140 	u64 *handle = area->priv;
141 
142 	if (!qcom_tzmem_using_shm_bridge)
143 		return;
144 
145 	qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
146 	kfree(handle);
147 }
148 
149 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
150 
qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)151 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
152 				      size_t size, gfp_t gfp)
153 {
154 	int ret;
155 
156 	struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
157 							     gfp);
158 	if (!area)
159 		return -ENOMEM;
160 
161 	area->size = PAGE_ALIGN(size);
162 
163 	area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
164 					 &area->paddr, gfp);
165 	if (!area->vaddr)
166 		return -ENOMEM;
167 
168 	ret = qcom_tzmem_init_area(area);
169 	if (ret) {
170 		dma_free_coherent(qcom_tzmem_dev, area->size,
171 				  area->vaddr, area->paddr);
172 		return ret;
173 	}
174 
175 	ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
176 				(phys_addr_t)area->paddr, size, -1);
177 	if (ret) {
178 		dma_free_coherent(qcom_tzmem_dev, area->size,
179 				  area->vaddr, area->paddr);
180 		return ret;
181 	}
182 
183 	scoped_guard(spinlock_irqsave, &pool->lock)
184 		list_add_tail(&area->list, &pool->areas);
185 
186 	area = NULL;
187 	return 0;
188 }
189 
190 /**
191  * qcom_tzmem_pool_new() - Create a new TZ memory pool.
192  * @config: Pool configuration.
193  *
194  * Create a new pool of memory suitable for sharing with the TrustZone.
195  *
196  * Must not be used in atomic context.
197  *
198  * Return: New memory pool address or ERR_PTR() on error.
199  */
200 struct qcom_tzmem_pool *
qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config * config)201 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
202 {
203 	int ret = -ENOMEM;
204 
205 	might_sleep();
206 
207 	switch (config->policy) {
208 	case QCOM_TZMEM_POLICY_STATIC:
209 		if (!config->initial_size)
210 			return ERR_PTR(-EINVAL);
211 		break;
212 	case QCOM_TZMEM_POLICY_MULTIPLIER:
213 		if (!config->increment)
214 			return ERR_PTR(-EINVAL);
215 		break;
216 	case QCOM_TZMEM_POLICY_ON_DEMAND:
217 		break;
218 	default:
219 		return ERR_PTR(-EINVAL);
220 	}
221 
222 	struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
223 							     GFP_KERNEL);
224 	if (!pool)
225 		return ERR_PTR(-ENOMEM);
226 
227 	pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
228 	if (!pool->genpool)
229 		return ERR_PTR(-ENOMEM);
230 
231 	gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
232 
233 	pool->policy = config->policy;
234 	pool->increment = config->increment;
235 	pool->max_size = config->max_size;
236 	INIT_LIST_HEAD(&pool->areas);
237 	spin_lock_init(&pool->lock);
238 
239 	if (config->initial_size) {
240 		ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
241 						 GFP_KERNEL);
242 		if (ret) {
243 			gen_pool_destroy(pool->genpool);
244 			return ERR_PTR(ret);
245 		}
246 	}
247 
248 	return_ptr(pool);
249 }
250 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
251 
252 /**
253  * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
254  * @pool: Memory pool to free.
255  *
256  * Must not be called if any of the allocated chunks has not been freed.
257  * Must not be used in atomic context.
258  */
qcom_tzmem_pool_free(struct qcom_tzmem_pool * pool)259 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
260 {
261 	struct qcom_tzmem_area *area, *next;
262 	struct qcom_tzmem_chunk *chunk;
263 	struct radix_tree_iter iter;
264 	bool non_empty = false;
265 	void __rcu **slot;
266 
267 	might_sleep();
268 
269 	if (!pool)
270 		return;
271 
272 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
273 		radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
274 			chunk = radix_tree_deref_slot_protected(slot,
275 						&qcom_tzmem_chunks_lock);
276 
277 			if (chunk->owner == pool)
278 				non_empty = true;
279 		}
280 	}
281 
282 	WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
283 
284 	list_for_each_entry_safe(area, next, &pool->areas, list) {
285 		list_del(&area->list);
286 		qcom_tzmem_cleanup_area(area);
287 		dma_free_coherent(qcom_tzmem_dev, area->size,
288 				  area->vaddr, area->paddr);
289 		kfree(area);
290 	}
291 
292 	gen_pool_destroy(pool->genpool);
293 	kfree(pool);
294 }
295 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
296 
devm_qcom_tzmem_pool_free(void * data)297 static void devm_qcom_tzmem_pool_free(void *data)
298 {
299 	struct qcom_tzmem_pool *pool = data;
300 
301 	qcom_tzmem_pool_free(pool);
302 }
303 
304 /**
305  * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
306  * @dev: Device managing this resource.
307  * @config: Pool configuration.
308  *
309  * Must not be used in atomic context.
310  *
311  * Return: Address of the managed pool or ERR_PTR() on failure.
312  */
313 struct qcom_tzmem_pool *
devm_qcom_tzmem_pool_new(struct device * dev,const struct qcom_tzmem_pool_config * config)314 devm_qcom_tzmem_pool_new(struct device *dev,
315 			 const struct qcom_tzmem_pool_config *config)
316 {
317 	struct qcom_tzmem_pool *pool;
318 	int ret;
319 
320 	pool = qcom_tzmem_pool_new(config);
321 	if (IS_ERR(pool))
322 		return pool;
323 
324 	ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
325 	if (ret)
326 		return ERR_PTR(ret);
327 
328 	return pool;
329 }
330 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
331 
qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool * pool,size_t requested,gfp_t gfp)332 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
333 				     size_t requested, gfp_t gfp)
334 {
335 	size_t current_size = gen_pool_size(pool->genpool);
336 
337 	if (pool->max_size && (current_size + requested) > pool->max_size)
338 		return false;
339 
340 	switch (pool->policy) {
341 	case QCOM_TZMEM_POLICY_STATIC:
342 		return false;
343 	case QCOM_TZMEM_POLICY_MULTIPLIER:
344 		requested = current_size * pool->increment;
345 		break;
346 	case QCOM_TZMEM_POLICY_ON_DEMAND:
347 		break;
348 	}
349 
350 	return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
351 }
352 
353 /**
354  * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
355  * @pool: TZ memory pool from which to allocate memory.
356  * @size: Number of bytes to allocate.
357  * @gfp: GFP flags.
358  *
359  * Can be used in any context.
360  *
361  * Return:
362  * Address of the allocated buffer or NULL if no more memory can be allocated.
363  * The buffer must be released using qcom_tzmem_free().
364  */
qcom_tzmem_alloc(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)365 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
366 {
367 	unsigned long vaddr;
368 	int ret;
369 
370 	if (!size)
371 		return NULL;
372 
373 	size = PAGE_ALIGN(size);
374 
375 	struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
376 							       gfp);
377 	if (!chunk)
378 		return NULL;
379 
380 again:
381 	vaddr = gen_pool_alloc(pool->genpool, size);
382 	if (!vaddr) {
383 		if (qcom_tzmem_try_grow_pool(pool, size, gfp))
384 			goto again;
385 
386 		return NULL;
387 	}
388 
389 	chunk->size = size;
390 	chunk->owner = pool;
391 
392 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
393 		ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
394 		if (ret) {
395 			gen_pool_free(pool->genpool, vaddr, size);
396 			return NULL;
397 		}
398 
399 		chunk = NULL;
400 	}
401 
402 	return (void *)vaddr;
403 }
404 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
405 
406 /**
407  * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
408  * @vaddr: Virtual address of the buffer.
409  *
410  * Can be used in any context.
411  */
qcom_tzmem_free(void * vaddr)412 void qcom_tzmem_free(void *vaddr)
413 {
414 	struct qcom_tzmem_chunk *chunk;
415 
416 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
417 		chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
418 					       (unsigned long)vaddr, NULL);
419 
420 	if (!chunk) {
421 		WARN(1, "Virtual address %p not owned by TZ memory allocator",
422 		     vaddr);
423 		return;
424 	}
425 
426 	scoped_guard(spinlock_irqsave, &chunk->owner->lock)
427 		gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
428 			      chunk->size);
429 	kfree(chunk);
430 }
431 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
432 
433 /**
434  * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
435  * @vaddr: Virtual address of memory allocated from a TZ memory pool.
436  *
437  * Can be used in any context. The address must point to memory allocated
438  * using qcom_tzmem_alloc().
439  *
440  * Returns:
441  * Physical address mapped from the virtual or 0 if the mapping failed.
442  */
qcom_tzmem_to_phys(void * vaddr)443 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
444 {
445 	struct qcom_tzmem_chunk *chunk;
446 	struct radix_tree_iter iter;
447 	void __rcu **slot;
448 	phys_addr_t ret;
449 
450 	guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
451 
452 	radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
453 		chunk = radix_tree_deref_slot_protected(slot,
454 						&qcom_tzmem_chunks_lock);
455 
456 		ret = gen_pool_virt_to_phys(chunk->owner->genpool,
457 					    (unsigned long)vaddr);
458 		if (ret == -1)
459 			continue;
460 
461 		return ret;
462 	}
463 
464 	return 0;
465 }
466 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
467 
qcom_tzmem_enable(struct device * dev)468 int qcom_tzmem_enable(struct device *dev)
469 {
470 	if (qcom_tzmem_dev)
471 		return -EBUSY;
472 
473 	qcom_tzmem_dev = dev;
474 
475 	return qcom_tzmem_init();
476 }
477 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
478 
479 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
480 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
481 MODULE_LICENSE("GPL");
482