1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Memory allocator for buffers shared with the TrustZone.
4 *
5 * Copyright (C) 2023-2024 Linaro Ltd.
6 */
7
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22
23 #include "qcom_scm.h"
24 #include "qcom_tzmem.h"
25
26 struct qcom_tzmem_area {
27 struct list_head list;
28 void *vaddr;
29 dma_addr_t paddr;
30 size_t size;
31 void *priv;
32 };
33
34 struct qcom_tzmem_pool {
35 struct gen_pool *genpool;
36 struct list_head areas;
37 enum qcom_tzmem_policy policy;
38 size_t increment;
39 size_t max_size;
40 spinlock_t lock;
41 };
42
43 struct qcom_tzmem_chunk {
44 size_t size;
45 struct qcom_tzmem_pool *owner;
46 };
47
48 static struct device *qcom_tzmem_dev;
49 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
50 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51
52 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53
qcom_tzmem_init(void)54 static int qcom_tzmem_init(void)
55 {
56 return 0;
57 }
58
qcom_tzmem_init_area(struct qcom_tzmem_area * area)59 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60 {
61 return 0;
62 }
63
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)64 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
65 {
66
67 }
68
69 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70
71 #include <linux/firmware/qcom/qcom_scm.h>
72 #include <linux/of.h>
73
74 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75
76 static bool qcom_tzmem_using_shm_bridge;
77
78 /* List of machines that are known to not support SHM bridge correctly. */
79 static const char *const qcom_tzmem_blacklist[] = {
80 "qcom,sc7180", /* hang in rmtfs memory assignment */
81 "qcom,sc8180x",
82 "qcom,sdm670", /* failure in GPU firmware loading */
83 "qcom,sdm845", /* reset in rmtfs memory assignment */
84 "qcom,sm7150", /* reset in rmtfs memory assignment */
85 "qcom,sm8150", /* reset in rmtfs memory assignment */
86 NULL
87 };
88
qcom_tzmem_init(void)89 static int qcom_tzmem_init(void)
90 {
91 const char *const *platform;
92 int ret;
93
94 for (platform = qcom_tzmem_blacklist; *platform; platform++) {
95 if (of_machine_is_compatible(*platform))
96 goto notsupp;
97 }
98
99 ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
100 if (ret == -EOPNOTSUPP)
101 goto notsupp;
102
103 if (!ret)
104 qcom_tzmem_using_shm_bridge = true;
105
106 return ret;
107
108 notsupp:
109 dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
110 return 0;
111 }
112
113 /**
114 * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
115 * @paddr: Physical address of the memory to share.
116 * @size: Size of the memory to share.
117 * @handle: Handle to the SHM bridge.
118 *
119 * On platforms that support SHM bridge, this function creates a SHM bridge
120 * for the given memory region with QTEE. The handle returned by this function
121 * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
122 *
123 * Return: On success, returns 0; on failure, returns < 0.
124 */
qcom_tzmem_shm_bridge_create(phys_addr_t paddr,size_t size,u64 * handle)125 int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
126 {
127 u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
128 int ret;
129
130 if (!qcom_tzmem_using_shm_bridge)
131 return 0;
132
133 pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
134 ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
135 size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
136
137 ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
138 size_and_flags, QCOM_SCM_VMID_HLOS,
139 handle);
140 if (ret) {
141 dev_err(qcom_tzmem_dev,
142 "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
143 ret, &paddr, size);
144
145 return ret;
146 }
147
148 return 0;
149 }
150 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
151
152 /**
153 * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
154 * @handle: Handle to the SHM bridge.
155 *
156 * On platforms that support SHM bridge, this function deletes the SHM bridge
157 * for the given memory region. The handle must be the same as the one
158 * returned by qcom_tzmem_shm_bridge_create().
159 */
qcom_tzmem_shm_bridge_delete(u64 handle)160 void qcom_tzmem_shm_bridge_delete(u64 handle)
161 {
162 if (qcom_tzmem_using_shm_bridge)
163 qcom_scm_shm_bridge_delete(handle);
164 }
165 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
166
qcom_tzmem_init_area(struct qcom_tzmem_area * area)167 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
168 {
169 int ret;
170
171 u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
172 if (!handle)
173 return -ENOMEM;
174
175 ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
176 if (ret)
177 return ret;
178
179 area->priv = no_free_ptr(handle);
180
181 return 0;
182 }
183
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)184 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
185 {
186 u64 *handle = area->priv;
187
188 qcom_tzmem_shm_bridge_delete(*handle);
189 kfree(handle);
190 }
191
192 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
193
qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)194 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
195 size_t size, gfp_t gfp)
196 {
197 int ret;
198
199 struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
200 gfp);
201 if (!area)
202 return -ENOMEM;
203
204 area->size = PAGE_ALIGN(size);
205
206 area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
207 &area->paddr, gfp);
208 if (!area->vaddr)
209 return -ENOMEM;
210
211 ret = qcom_tzmem_init_area(area);
212 if (ret) {
213 dma_free_coherent(qcom_tzmem_dev, area->size,
214 area->vaddr, area->paddr);
215 return ret;
216 }
217
218 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
219 (phys_addr_t)area->paddr, size, -1);
220 if (ret) {
221 dma_free_coherent(qcom_tzmem_dev, area->size,
222 area->vaddr, area->paddr);
223 return ret;
224 }
225
226 scoped_guard(spinlock_irqsave, &pool->lock)
227 list_add_tail(&area->list, &pool->areas);
228
229 area = NULL;
230 return 0;
231 }
232
233 /**
234 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
235 * @config: Pool configuration.
236 *
237 * Create a new pool of memory suitable for sharing with the TrustZone.
238 *
239 * Must not be used in atomic context.
240 *
241 * Return: New memory pool address or ERR_PTR() on error.
242 */
243 struct qcom_tzmem_pool *
qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config * config)244 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
245 {
246 int ret = -ENOMEM;
247
248 might_sleep();
249
250 switch (config->policy) {
251 case QCOM_TZMEM_POLICY_STATIC:
252 if (!config->initial_size)
253 return ERR_PTR(-EINVAL);
254 break;
255 case QCOM_TZMEM_POLICY_MULTIPLIER:
256 if (!config->increment)
257 return ERR_PTR(-EINVAL);
258 break;
259 case QCOM_TZMEM_POLICY_ON_DEMAND:
260 break;
261 default:
262 return ERR_PTR(-EINVAL);
263 }
264
265 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc_obj(*pool);
266 if (!pool)
267 return ERR_PTR(-ENOMEM);
268
269 pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
270 if (!pool->genpool)
271 return ERR_PTR(-ENOMEM);
272
273 gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
274
275 pool->policy = config->policy;
276 pool->increment = config->increment;
277 pool->max_size = config->max_size;
278 INIT_LIST_HEAD(&pool->areas);
279 spin_lock_init(&pool->lock);
280
281 if (config->initial_size) {
282 ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
283 GFP_KERNEL);
284 if (ret) {
285 gen_pool_destroy(pool->genpool);
286 return ERR_PTR(ret);
287 }
288 }
289
290 return_ptr(pool);
291 }
292 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
293
294 /**
295 * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
296 * @pool: Memory pool to free.
297 *
298 * Must not be called if any of the allocated chunks has not been freed.
299 * Must not be used in atomic context.
300 */
qcom_tzmem_pool_free(struct qcom_tzmem_pool * pool)301 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
302 {
303 struct qcom_tzmem_area *area, *next;
304 struct qcom_tzmem_chunk *chunk;
305 struct radix_tree_iter iter;
306 bool non_empty = false;
307 void __rcu **slot;
308
309 might_sleep();
310
311 if (!pool)
312 return;
313
314 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
315 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
316 chunk = radix_tree_deref_slot_protected(slot,
317 &qcom_tzmem_chunks_lock);
318
319 if (chunk->owner == pool)
320 non_empty = true;
321 }
322 }
323
324 WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
325
326 list_for_each_entry_safe(area, next, &pool->areas, list) {
327 list_del(&area->list);
328 qcom_tzmem_cleanup_area(area);
329 dma_free_coherent(qcom_tzmem_dev, area->size,
330 area->vaddr, area->paddr);
331 kfree(area);
332 }
333
334 gen_pool_destroy(pool->genpool);
335 kfree(pool);
336 }
337 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
338
devm_qcom_tzmem_pool_free(void * data)339 static void devm_qcom_tzmem_pool_free(void *data)
340 {
341 struct qcom_tzmem_pool *pool = data;
342
343 qcom_tzmem_pool_free(pool);
344 }
345
346 /**
347 * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
348 * @dev: Device managing this resource.
349 * @config: Pool configuration.
350 *
351 * Must not be used in atomic context.
352 *
353 * Return: Address of the managed pool or ERR_PTR() on failure.
354 */
355 struct qcom_tzmem_pool *
devm_qcom_tzmem_pool_new(struct device * dev,const struct qcom_tzmem_pool_config * config)356 devm_qcom_tzmem_pool_new(struct device *dev,
357 const struct qcom_tzmem_pool_config *config)
358 {
359 struct qcom_tzmem_pool *pool;
360 int ret;
361
362 pool = qcom_tzmem_pool_new(config);
363 if (IS_ERR(pool))
364 return pool;
365
366 ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
367 if (ret)
368 return ERR_PTR(ret);
369
370 return pool;
371 }
372 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
373
qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool * pool,size_t requested,gfp_t gfp)374 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
375 size_t requested, gfp_t gfp)
376 {
377 size_t current_size = gen_pool_size(pool->genpool);
378
379 if (pool->max_size && (current_size + requested) > pool->max_size)
380 return false;
381
382 switch (pool->policy) {
383 case QCOM_TZMEM_POLICY_STATIC:
384 return false;
385 case QCOM_TZMEM_POLICY_MULTIPLIER:
386 requested = current_size * pool->increment;
387 break;
388 case QCOM_TZMEM_POLICY_ON_DEMAND:
389 break;
390 }
391
392 return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
393 }
394
395 /**
396 * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
397 * @pool: TZ memory pool from which to allocate memory.
398 * @size: Number of bytes to allocate.
399 * @gfp: GFP flags.
400 *
401 * Can be used in any context.
402 *
403 * Return:
404 * Address of the allocated buffer or NULL if no more memory can be allocated.
405 * The buffer must be released using qcom_tzmem_free().
406 */
qcom_tzmem_alloc(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)407 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
408 {
409 unsigned long vaddr;
410 int ret;
411
412 if (!size)
413 return NULL;
414
415 size = PAGE_ALIGN(size);
416
417 struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
418 gfp);
419 if (!chunk)
420 return NULL;
421
422 again:
423 vaddr = gen_pool_alloc(pool->genpool, size);
424 if (!vaddr) {
425 if (qcom_tzmem_try_grow_pool(pool, size, gfp))
426 goto again;
427
428 return NULL;
429 }
430
431 chunk->size = size;
432 chunk->owner = pool;
433
434 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
435 ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
436 if (ret) {
437 gen_pool_free(pool->genpool, vaddr, size);
438 return NULL;
439 }
440
441 chunk = NULL;
442 }
443
444 return (void *)vaddr;
445 }
446 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
447
448 /**
449 * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
450 * @vaddr: Virtual address of the buffer.
451 *
452 * Can be used in any context.
453 */
qcom_tzmem_free(void * vaddr)454 void qcom_tzmem_free(void *vaddr)
455 {
456 struct qcom_tzmem_chunk *chunk;
457
458 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
459 chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
460 (unsigned long)vaddr, NULL);
461
462 if (!chunk) {
463 WARN(1, "Virtual address %p not owned by TZ memory allocator",
464 vaddr);
465 return;
466 }
467
468 scoped_guard(spinlock_irqsave, &chunk->owner->lock)
469 gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
470 chunk->size);
471 kfree(chunk);
472 }
473 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
474
475 /**
476 * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
477 * @vaddr: Virtual address of memory allocated from a TZ memory pool.
478 *
479 * Can be used in any context. The address must point to memory allocated
480 * using qcom_tzmem_alloc().
481 *
482 * Returns:
483 * Physical address mapped from the virtual or 0 if the mapping failed.
484 */
qcom_tzmem_to_phys(void * vaddr)485 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
486 {
487 struct qcom_tzmem_chunk *chunk;
488 struct radix_tree_iter iter;
489 void __rcu **slot;
490 phys_addr_t ret;
491
492 guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
493
494 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
495 chunk = radix_tree_deref_slot_protected(slot,
496 &qcom_tzmem_chunks_lock);
497
498 ret = gen_pool_virt_to_phys(chunk->owner->genpool,
499 (unsigned long)vaddr);
500 if (ret == -1)
501 continue;
502
503 return ret;
504 }
505
506 return 0;
507 }
508 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
509
qcom_tzmem_enable(struct device * dev)510 int qcom_tzmem_enable(struct device *dev)
511 {
512 if (qcom_tzmem_dev)
513 return -EBUSY;
514
515 qcom_tzmem_dev = dev;
516
517 return qcom_tzmem_init();
518 }
519 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
520
521 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
522 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
523 MODULE_LICENSE("GPL");
524