xref: /linux/drivers/firmware/qcom/qcom_tzmem.c (revision 8aa1e3a6f0ffbcfdf3bd7d87feb9090f96c54bc4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory allocator for buffers shared with the TrustZone.
4  *
5  * Copyright (C) 2023-2024 Linaro Ltd.
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 
23 #include "qcom_scm.h"
24 #include "qcom_tzmem.h"
25 
26 struct qcom_tzmem_area {
27 	struct list_head list;
28 	void *vaddr;
29 	dma_addr_t paddr;
30 	size_t size;
31 	void *priv;
32 };
33 
34 struct qcom_tzmem_pool {
35 	struct gen_pool *genpool;
36 	struct list_head areas;
37 	enum qcom_tzmem_policy policy;
38 	size_t increment;
39 	size_t max_size;
40 	spinlock_t lock;
41 };
42 
43 struct qcom_tzmem_chunk {
44 	size_t size;
45 	struct qcom_tzmem_pool *owner;
46 };
47 
48 static struct device *qcom_tzmem_dev;
49 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
50 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51 
52 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53 
54 static int qcom_tzmem_init(void)
55 {
56 	return 0;
57 }
58 
59 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60 {
61 	return 0;
62 }
63 
64 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
65 {
66 
67 }
68 
69 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70 
71 #include <linux/firmware/qcom/qcom_scm.h>
72 #include <linux/of.h>
73 
74 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75 
76 static bool qcom_tzmem_using_shm_bridge;
77 
78 /* List of machines that are known to not support SHM bridge correctly. */
79 static const char *const qcom_tzmem_blacklist[] = {
80 	"qcom,sc8180x",
81 	"qcom,sdm670", /* failure in GPU firmware loading */
82 	"qcom,sdm845", /* reset in rmtfs memory assignment */
83 	"qcom,sm7150", /* reset in rmtfs memory assignment */
84 	"qcom,sm8150", /* reset in rmtfs memory assignment */
85 	NULL
86 };
87 
88 static int qcom_tzmem_init(void)
89 {
90 	const char *const *platform;
91 	int ret;
92 
93 	for (platform = qcom_tzmem_blacklist; *platform; platform++) {
94 		if (of_machine_is_compatible(*platform))
95 			goto notsupp;
96 	}
97 
98 	ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
99 	if (ret == -EOPNOTSUPP)
100 		goto notsupp;
101 
102 	if (!ret)
103 		qcom_tzmem_using_shm_bridge = true;
104 
105 	return ret;
106 
107 notsupp:
108 	dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
109 	return 0;
110 }
111 
112 /**
113  * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
114  * @paddr: Physical address of the memory to share.
115  * @size: Size of the memory to share.
116  * @handle: Handle to the SHM bridge.
117  *
118  * On platforms that support SHM bridge, this function creates a SHM bridge
119  * for the given memory region with QTEE. The handle returned by this function
120  * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
121  *
122  * Return: On success, returns 0; on failure, returns < 0.
123  */
124 int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
125 {
126 	u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
127 	int ret;
128 
129 	if (!qcom_tzmem_using_shm_bridge)
130 		return 0;
131 
132 	pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
133 	ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
134 	size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
135 
136 	ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
137 					 size_and_flags, QCOM_SCM_VMID_HLOS,
138 					 handle);
139 	if (ret) {
140 		dev_err(qcom_tzmem_dev,
141 			"SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
142 			ret, &paddr, size);
143 
144 		return ret;
145 	}
146 
147 	return 0;
148 }
149 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
150 
151 /**
152  * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
153  * @handle: Handle to the SHM bridge.
154  *
155  * On platforms that support SHM bridge, this function deletes the SHM bridge
156  * for the given memory region. The handle must be the same as the one
157  * returned by qcom_tzmem_shm_bridge_create().
158  */
159 void qcom_tzmem_shm_bridge_delete(u64 handle)
160 {
161 	if (qcom_tzmem_using_shm_bridge)
162 		qcom_scm_shm_bridge_delete(handle);
163 }
164 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
165 
166 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
167 {
168 	int ret;
169 
170 	u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
171 	if (!handle)
172 		return -ENOMEM;
173 
174 	ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
175 	if (ret)
176 		return ret;
177 
178 	area->priv = no_free_ptr(handle);
179 
180 	return 0;
181 }
182 
183 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
184 {
185 	u64 *handle = area->priv;
186 
187 	qcom_tzmem_shm_bridge_delete(*handle);
188 	kfree(handle);
189 }
190 
191 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
192 
193 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
194 				      size_t size, gfp_t gfp)
195 {
196 	int ret;
197 
198 	struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
199 							     gfp);
200 	if (!area)
201 		return -ENOMEM;
202 
203 	area->size = PAGE_ALIGN(size);
204 
205 	area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
206 					 &area->paddr, gfp);
207 	if (!area->vaddr)
208 		return -ENOMEM;
209 
210 	ret = qcom_tzmem_init_area(area);
211 	if (ret) {
212 		dma_free_coherent(qcom_tzmem_dev, area->size,
213 				  area->vaddr, area->paddr);
214 		return ret;
215 	}
216 
217 	ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
218 				(phys_addr_t)area->paddr, size, -1);
219 	if (ret) {
220 		dma_free_coherent(qcom_tzmem_dev, area->size,
221 				  area->vaddr, area->paddr);
222 		return ret;
223 	}
224 
225 	scoped_guard(spinlock_irqsave, &pool->lock)
226 		list_add_tail(&area->list, &pool->areas);
227 
228 	area = NULL;
229 	return 0;
230 }
231 
232 /**
233  * qcom_tzmem_pool_new() - Create a new TZ memory pool.
234  * @config: Pool configuration.
235  *
236  * Create a new pool of memory suitable for sharing with the TrustZone.
237  *
238  * Must not be used in atomic context.
239  *
240  * Return: New memory pool address or ERR_PTR() on error.
241  */
242 struct qcom_tzmem_pool *
243 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
244 {
245 	int ret = -ENOMEM;
246 
247 	might_sleep();
248 
249 	switch (config->policy) {
250 	case QCOM_TZMEM_POLICY_STATIC:
251 		if (!config->initial_size)
252 			return ERR_PTR(-EINVAL);
253 		break;
254 	case QCOM_TZMEM_POLICY_MULTIPLIER:
255 		if (!config->increment)
256 			return ERR_PTR(-EINVAL);
257 		break;
258 	case QCOM_TZMEM_POLICY_ON_DEMAND:
259 		break;
260 	default:
261 		return ERR_PTR(-EINVAL);
262 	}
263 
264 	struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
265 							     GFP_KERNEL);
266 	if (!pool)
267 		return ERR_PTR(-ENOMEM);
268 
269 	pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
270 	if (!pool->genpool)
271 		return ERR_PTR(-ENOMEM);
272 
273 	gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
274 
275 	pool->policy = config->policy;
276 	pool->increment = config->increment;
277 	pool->max_size = config->max_size;
278 	INIT_LIST_HEAD(&pool->areas);
279 	spin_lock_init(&pool->lock);
280 
281 	if (config->initial_size) {
282 		ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
283 						 GFP_KERNEL);
284 		if (ret) {
285 			gen_pool_destroy(pool->genpool);
286 			return ERR_PTR(ret);
287 		}
288 	}
289 
290 	return_ptr(pool);
291 }
292 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
293 
294 /**
295  * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
296  * @pool: Memory pool to free.
297  *
298  * Must not be called if any of the allocated chunks has not been freed.
299  * Must not be used in atomic context.
300  */
301 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
302 {
303 	struct qcom_tzmem_area *area, *next;
304 	struct qcom_tzmem_chunk *chunk;
305 	struct radix_tree_iter iter;
306 	bool non_empty = false;
307 	void __rcu **slot;
308 
309 	might_sleep();
310 
311 	if (!pool)
312 		return;
313 
314 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
315 		radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
316 			chunk = radix_tree_deref_slot_protected(slot,
317 						&qcom_tzmem_chunks_lock);
318 
319 			if (chunk->owner == pool)
320 				non_empty = true;
321 		}
322 	}
323 
324 	WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
325 
326 	list_for_each_entry_safe(area, next, &pool->areas, list) {
327 		list_del(&area->list);
328 		qcom_tzmem_cleanup_area(area);
329 		dma_free_coherent(qcom_tzmem_dev, area->size,
330 				  area->vaddr, area->paddr);
331 		kfree(area);
332 	}
333 
334 	gen_pool_destroy(pool->genpool);
335 	kfree(pool);
336 }
337 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
338 
339 static void devm_qcom_tzmem_pool_free(void *data)
340 {
341 	struct qcom_tzmem_pool *pool = data;
342 
343 	qcom_tzmem_pool_free(pool);
344 }
345 
346 /**
347  * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
348  * @dev: Device managing this resource.
349  * @config: Pool configuration.
350  *
351  * Must not be used in atomic context.
352  *
353  * Return: Address of the managed pool or ERR_PTR() on failure.
354  */
355 struct qcom_tzmem_pool *
356 devm_qcom_tzmem_pool_new(struct device *dev,
357 			 const struct qcom_tzmem_pool_config *config)
358 {
359 	struct qcom_tzmem_pool *pool;
360 	int ret;
361 
362 	pool = qcom_tzmem_pool_new(config);
363 	if (IS_ERR(pool))
364 		return pool;
365 
366 	ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
367 	if (ret)
368 		return ERR_PTR(ret);
369 
370 	return pool;
371 }
372 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
373 
374 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
375 				     size_t requested, gfp_t gfp)
376 {
377 	size_t current_size = gen_pool_size(pool->genpool);
378 
379 	if (pool->max_size && (current_size + requested) > pool->max_size)
380 		return false;
381 
382 	switch (pool->policy) {
383 	case QCOM_TZMEM_POLICY_STATIC:
384 		return false;
385 	case QCOM_TZMEM_POLICY_MULTIPLIER:
386 		requested = current_size * pool->increment;
387 		break;
388 	case QCOM_TZMEM_POLICY_ON_DEMAND:
389 		break;
390 	}
391 
392 	return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
393 }
394 
395 /**
396  * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
397  * @pool: TZ memory pool from which to allocate memory.
398  * @size: Number of bytes to allocate.
399  * @gfp: GFP flags.
400  *
401  * Can be used in any context.
402  *
403  * Return:
404  * Address of the allocated buffer or NULL if no more memory can be allocated.
405  * The buffer must be released using qcom_tzmem_free().
406  */
407 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
408 {
409 	unsigned long vaddr;
410 	int ret;
411 
412 	if (!size)
413 		return NULL;
414 
415 	size = PAGE_ALIGN(size);
416 
417 	struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
418 							       gfp);
419 	if (!chunk)
420 		return NULL;
421 
422 again:
423 	vaddr = gen_pool_alloc(pool->genpool, size);
424 	if (!vaddr) {
425 		if (qcom_tzmem_try_grow_pool(pool, size, gfp))
426 			goto again;
427 
428 		return NULL;
429 	}
430 
431 	chunk->size = size;
432 	chunk->owner = pool;
433 
434 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
435 		ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
436 		if (ret) {
437 			gen_pool_free(pool->genpool, vaddr, size);
438 			return NULL;
439 		}
440 
441 		chunk = NULL;
442 	}
443 
444 	return (void *)vaddr;
445 }
446 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
447 
448 /**
449  * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
450  * @vaddr: Virtual address of the buffer.
451  *
452  * Can be used in any context.
453  */
454 void qcom_tzmem_free(void *vaddr)
455 {
456 	struct qcom_tzmem_chunk *chunk;
457 
458 	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
459 		chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
460 					       (unsigned long)vaddr, NULL);
461 
462 	if (!chunk) {
463 		WARN(1, "Virtual address %p not owned by TZ memory allocator",
464 		     vaddr);
465 		return;
466 	}
467 
468 	scoped_guard(spinlock_irqsave, &chunk->owner->lock)
469 		gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
470 			      chunk->size);
471 	kfree(chunk);
472 }
473 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
474 
475 /**
476  * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
477  * @vaddr: Virtual address of memory allocated from a TZ memory pool.
478  *
479  * Can be used in any context. The address must point to memory allocated
480  * using qcom_tzmem_alloc().
481  *
482  * Returns:
483  * Physical address mapped from the virtual or 0 if the mapping failed.
484  */
485 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
486 {
487 	struct qcom_tzmem_chunk *chunk;
488 	struct radix_tree_iter iter;
489 	void __rcu **slot;
490 	phys_addr_t ret;
491 
492 	guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
493 
494 	radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
495 		chunk = radix_tree_deref_slot_protected(slot,
496 						&qcom_tzmem_chunks_lock);
497 
498 		ret = gen_pool_virt_to_phys(chunk->owner->genpool,
499 					    (unsigned long)vaddr);
500 		if (ret == -1)
501 			continue;
502 
503 		return ret;
504 	}
505 
506 	return 0;
507 }
508 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
509 
510 int qcom_tzmem_enable(struct device *dev)
511 {
512 	if (qcom_tzmem_dev)
513 		return -EBUSY;
514 
515 	qcom_tzmem_dev = dev;
516 
517 	return qcom_tzmem_init();
518 }
519 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
520 
521 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
522 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
523 MODULE_LICENSE("GPL");
524