xref: /linux/drivers/tee/qcomtee/shm.c (revision 38057e323657695ec8f814aff0cdd1c7e00d3e9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/firmware/qcom/qcom_tzmem.h>
9 #include <linux/mm.h>
10 
11 #include "qcomtee.h"
12 
13 /**
14  * define MAX_OUTBOUND_BUFFER_SIZE - Maximum size of outbound buffers.
15  *
16  * The size of outbound buffer depends on QTEE callback requests.
17  */
18 #define MAX_OUTBOUND_BUFFER_SIZE SZ_4K
19 
20 /**
21  * define MAX_INBOUND_BUFFER_SIZE - Maximum size of the inbound buffer.
22  *
23  * The size of the inbound buffer depends on the user's requests,
24  * specifically the number of IB and OB arguments. If an invocation
25  * requires a size larger than %MAX_INBOUND_BUFFER_SIZE, the user should
26  * consider using another form of shared memory with QTEE.
27  */
28 #define MAX_INBOUND_BUFFER_SIZE SZ_4M
29 
30 /**
31  * qcomtee_msg_buffers_alloc() - Allocate inbound and outbound buffers.
32  * @oic: context to use for the current invocation.
33  * @u: array of arguments for the current invocation.
34  *
35  * It calculates the size of inbound and outbound buffers based on the
36  * arguments in @u. It allocates the buffers from the teedev pool.
37  *
38  * Return: On success, returns 0. On error, returns < 0.
39  */
40 int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic,
41 			      struct qcomtee_arg *u)
42 {
43 	struct tee_context *ctx = oic->ctx;
44 	struct tee_shm *shm;
45 	size_t size;
46 	int i;
47 
48 	/* Start offset in a message for buffer arguments. */
49 	size = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke,
50 				       qcomtee_args_len(u));
51 	if (size > MAX_INBOUND_BUFFER_SIZE)
52 		return -EINVAL;
53 
54 	/* Add size of IB arguments. */
55 	qcomtee_arg_for_each_input_buffer(i, u) {
56 		size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
57 		if (size > MAX_INBOUND_BUFFER_SIZE)
58 			return -EINVAL;
59 	}
60 
61 	/* Add size of OB arguments. */
62 	qcomtee_arg_for_each_output_buffer(i, u) {
63 		size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
64 		if (size > MAX_INBOUND_BUFFER_SIZE)
65 			return -EINVAL;
66 	}
67 
68 	shm = tee_shm_alloc_priv_buf(ctx, size);
69 	if (IS_ERR(shm))
70 		return PTR_ERR(shm);
71 
72 	/* Allocate inbound buffer. */
73 	oic->in_shm = shm;
74 	shm = tee_shm_alloc_priv_buf(ctx, MAX_OUTBOUND_BUFFER_SIZE);
75 	if (IS_ERR(shm)) {
76 		tee_shm_free(oic->in_shm);
77 
78 		return PTR_ERR(shm);
79 	}
80 	/* Allocate outbound buffer. */
81 	oic->out_shm = shm;
82 
83 	oic->in_msg.addr = tee_shm_get_va(oic->in_shm, 0);
84 	oic->in_msg.size = tee_shm_get_size(oic->in_shm);
85 	oic->out_msg.addr = tee_shm_get_va(oic->out_shm, 0);
86 	oic->out_msg.size = tee_shm_get_size(oic->out_shm);
87 	/* QTEE assume unused buffers are zeroed. */
88 	memzero_explicit(oic->in_msg.addr, oic->in_msg.size);
89 	memzero_explicit(oic->out_msg.addr, oic->out_msg.size);
90 
91 	return 0;
92 }
93 
94 void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic)
95 {
96 	tee_shm_free(oic->in_shm);
97 	tee_shm_free(oic->out_shm);
98 }
99 
100 /* Dynamic shared memory pool based on tee_dyn_shm_alloc_helper(). */
101 
102 static int qcomtee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
103 				struct page **pages, size_t num_pages,
104 				unsigned long start)
105 {
106 	return qcom_tzmem_shm_bridge_create(shm->paddr, shm->size,
107 					    &shm->sec_world_id);
108 }
109 
110 static int qcomtee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
111 {
112 	qcom_tzmem_shm_bridge_delete(shm->sec_world_id);
113 
114 	return 0;
115 }
116 
117 static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
118 			 size_t size, size_t align)
119 {
120 	return tee_dyn_shm_alloc_helper(shm, size, align, qcomtee_shm_register);
121 }
122 
123 static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
124 {
125 	tee_dyn_shm_free_helper(shm, qcomtee_shm_unregister);
126 }
127 
128 static void pool_op_destroy_pool(struct tee_shm_pool *pool)
129 {
130 	kfree(pool);
131 }
132 
133 static const struct tee_shm_pool_ops pool_ops = {
134 	.alloc = pool_op_alloc,
135 	.free = pool_op_free,
136 	.destroy_pool = pool_op_destroy_pool,
137 };
138 
139 struct tee_shm_pool *qcomtee_shm_pool_alloc(void)
140 {
141 	struct tee_shm_pool *pool;
142 
143 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
144 	if (!pool)
145 		return ERR_PTR(-ENOMEM);
146 
147 	pool->ops = &pool_ops;
148 
149 	return pool;
150 }
151