xref: /linux/drivers/tee/optee/call.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 #include <linux/device.h>
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/tee_drv.h>
11 #include <linux/types.h>
12 #include "optee_private.h"
13 
14 void optee_cq_wait_init(struct optee_call_queue *cq,
15 			struct optee_call_waiter *w)
16 {
17 	/*
18 	 * We're preparing to make a call to secure world. In case we can't
19 	 * allocate a thread in secure world we'll end up waiting in
20 	 * optee_cq_wait_for_completion().
21 	 *
22 	 * Normally if there's no contention in secure world the call will
23 	 * complete and we can cleanup directly with optee_cq_wait_final().
24 	 */
25 	mutex_lock(&cq->mutex);
26 
27 	/*
28 	 * We add ourselves to the queue, but we don't wait. This
29 	 * guarantees that we don't lose a completion if secure world
30 	 * returns busy and another thread just exited and try to complete
31 	 * someone.
32 	 */
33 	init_completion(&w->c);
34 	list_add_tail(&w->list_node, &cq->waiters);
35 
36 	mutex_unlock(&cq->mutex);
37 }
38 
39 void optee_cq_wait_for_completion(struct optee_call_queue *cq,
40 				  struct optee_call_waiter *w)
41 {
42 	wait_for_completion(&w->c);
43 
44 	mutex_lock(&cq->mutex);
45 
46 	/* Move to end of list to get out of the way for other waiters */
47 	list_del(&w->list_node);
48 	reinit_completion(&w->c);
49 	list_add_tail(&w->list_node, &cq->waiters);
50 
51 	mutex_unlock(&cq->mutex);
52 }
53 
54 static void optee_cq_complete_one(struct optee_call_queue *cq)
55 {
56 	struct optee_call_waiter *w;
57 
58 	list_for_each_entry(w, &cq->waiters, list_node) {
59 		if (!completion_done(&w->c)) {
60 			complete(&w->c);
61 			break;
62 		}
63 	}
64 }
65 
66 void optee_cq_wait_final(struct optee_call_queue *cq,
67 			 struct optee_call_waiter *w)
68 {
69 	/*
70 	 * We're done with the call to secure world. The thread in secure
71 	 * world that was used for this call is now available for some
72 	 * other task to use.
73 	 */
74 	mutex_lock(&cq->mutex);
75 
76 	/* Get out of the list */
77 	list_del(&w->list_node);
78 
79 	/* Wake up one eventual waiting task */
80 	optee_cq_complete_one(cq);
81 
82 	/*
83 	 * If we're completed we've got a completion from another task that
84 	 * was just done with its call to secure world. Since yet another
85 	 * thread now is available in secure world wake up another eventual
86 	 * waiting task.
87 	 */
88 	if (completion_done(&w->c))
89 		optee_cq_complete_one(cq);
90 
91 	mutex_unlock(&cq->mutex);
92 }
93 
94 /* Requires the filpstate mutex to be held */
95 static struct optee_session *find_session(struct optee_context_data *ctxdata,
96 					  u32 session_id)
97 {
98 	struct optee_session *sess;
99 
100 	list_for_each_entry(sess, &ctxdata->sess_list, list_node)
101 		if (sess->session_id == session_id)
102 			return sess;
103 
104 	return NULL;
105 }
106 
107 struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
108 				  struct optee_msg_arg **msg_arg)
109 {
110 	struct optee *optee = tee_get_drvdata(ctx->teedev);
111 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
112 	struct tee_shm *shm;
113 	struct optee_msg_arg *ma;
114 
115 	/*
116 	 * rpc_arg_count is set to the number of allocated parameters in
117 	 * the RPC argument struct if a second MSG arg struct is expected.
118 	 * The second arg struct will then be used for RPC.
119 	 */
120 	if (optee->rpc_arg_count)
121 		sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
122 
123 	shm = tee_shm_alloc_priv_buf(ctx, sz);
124 	if (IS_ERR(shm))
125 		return shm;
126 
127 	ma = tee_shm_get_va(shm, 0);
128 	if (IS_ERR(ma)) {
129 		tee_shm_free(shm);
130 		return (void *)ma;
131 	}
132 
133 	memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
134 	ma->num_params = num_params;
135 	*msg_arg = ma;
136 
137 	return shm;
138 }
139 
140 int optee_open_session(struct tee_context *ctx,
141 		       struct tee_ioctl_open_session_arg *arg,
142 		       struct tee_param *param)
143 {
144 	struct optee *optee = tee_get_drvdata(ctx->teedev);
145 	struct optee_context_data *ctxdata = ctx->data;
146 	int rc;
147 	struct tee_shm *shm;
148 	struct optee_msg_arg *msg_arg;
149 	struct optee_session *sess = NULL;
150 	uuid_t client_uuid;
151 
152 	/* +2 for the meta parameters added below */
153 	shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
154 	if (IS_ERR(shm))
155 		return PTR_ERR(shm);
156 
157 	msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
158 	msg_arg->cancel_id = arg->cancel_id;
159 
160 	/*
161 	 * Initialize and add the meta parameters needed when opening a
162 	 * session.
163 	 */
164 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
165 				  OPTEE_MSG_ATTR_META;
166 	msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
167 				  OPTEE_MSG_ATTR_META;
168 	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
169 	msg_arg->params[1].u.value.c = arg->clnt_login;
170 
171 	rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
172 					  arg->clnt_uuid);
173 	if (rc)
174 		goto out;
175 	export_uuid(msg_arg->params[1].u.octets, &client_uuid);
176 
177 	rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
178 				      arg->num_params, param);
179 	if (rc)
180 		goto out;
181 
182 	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
183 	if (!sess) {
184 		rc = -ENOMEM;
185 		goto out;
186 	}
187 
188 	if (optee->ops->do_call_with_arg(ctx, shm)) {
189 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
190 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
191 	}
192 
193 	if (msg_arg->ret == TEEC_SUCCESS) {
194 		/* A new session has been created, add it to the list. */
195 		sess->session_id = msg_arg->session;
196 		mutex_lock(&ctxdata->mutex);
197 		list_add(&sess->list_node, &ctxdata->sess_list);
198 		mutex_unlock(&ctxdata->mutex);
199 	} else {
200 		kfree(sess);
201 	}
202 
203 	if (optee->ops->from_msg_param(optee, param, arg->num_params,
204 				       msg_arg->params + 2)) {
205 		arg->ret = TEEC_ERROR_COMMUNICATION;
206 		arg->ret_origin = TEEC_ORIGIN_COMMS;
207 		/* Close session again to avoid leakage */
208 		optee_close_session(ctx, msg_arg->session);
209 	} else {
210 		arg->session = msg_arg->session;
211 		arg->ret = msg_arg->ret;
212 		arg->ret_origin = msg_arg->ret_origin;
213 	}
214 out:
215 	tee_shm_free(shm);
216 
217 	return rc;
218 }
219 
220 int optee_close_session_helper(struct tee_context *ctx, u32 session)
221 {
222 	struct tee_shm *shm;
223 	struct optee *optee = tee_get_drvdata(ctx->teedev);
224 	struct optee_msg_arg *msg_arg;
225 
226 	shm = optee_get_msg_arg(ctx, 0, &msg_arg);
227 	if (IS_ERR(shm))
228 		return PTR_ERR(shm);
229 
230 	msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
231 	msg_arg->session = session;
232 	optee->ops->do_call_with_arg(ctx, shm);
233 
234 	tee_shm_free(shm);
235 
236 	return 0;
237 }
238 
239 int optee_close_session(struct tee_context *ctx, u32 session)
240 {
241 	struct optee_context_data *ctxdata = ctx->data;
242 	struct optee_session *sess;
243 
244 	/* Check that the session is valid and remove it from the list */
245 	mutex_lock(&ctxdata->mutex);
246 	sess = find_session(ctxdata, session);
247 	if (sess)
248 		list_del(&sess->list_node);
249 	mutex_unlock(&ctxdata->mutex);
250 	if (!sess)
251 		return -EINVAL;
252 	kfree(sess);
253 
254 	return optee_close_session_helper(ctx, session);
255 }
256 
257 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
258 		      struct tee_param *param)
259 {
260 	struct optee *optee = tee_get_drvdata(ctx->teedev);
261 	struct optee_context_data *ctxdata = ctx->data;
262 	struct tee_shm *shm;
263 	struct optee_msg_arg *msg_arg;
264 	struct optee_session *sess;
265 	int rc;
266 
267 	/* Check that the session is valid */
268 	mutex_lock(&ctxdata->mutex);
269 	sess = find_session(ctxdata, arg->session);
270 	mutex_unlock(&ctxdata->mutex);
271 	if (!sess)
272 		return -EINVAL;
273 
274 	shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
275 	if (IS_ERR(shm))
276 		return PTR_ERR(shm);
277 	msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
278 	msg_arg->func = arg->func;
279 	msg_arg->session = arg->session;
280 	msg_arg->cancel_id = arg->cancel_id;
281 
282 	rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
283 				      param);
284 	if (rc)
285 		goto out;
286 
287 	if (optee->ops->do_call_with_arg(ctx, shm)) {
288 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
289 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
290 	}
291 
292 	if (optee->ops->from_msg_param(optee, param, arg->num_params,
293 				       msg_arg->params)) {
294 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
295 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
296 	}
297 
298 	arg->ret = msg_arg->ret;
299 	arg->ret_origin = msg_arg->ret_origin;
300 out:
301 	tee_shm_free(shm);
302 	return rc;
303 }
304 
305 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
306 {
307 	struct optee *optee = tee_get_drvdata(ctx->teedev);
308 	struct optee_context_data *ctxdata = ctx->data;
309 	struct tee_shm *shm;
310 	struct optee_msg_arg *msg_arg;
311 	struct optee_session *sess;
312 
313 	/* Check that the session is valid */
314 	mutex_lock(&ctxdata->mutex);
315 	sess = find_session(ctxdata, session);
316 	mutex_unlock(&ctxdata->mutex);
317 	if (!sess)
318 		return -EINVAL;
319 
320 	shm = optee_get_msg_arg(ctx, 0, &msg_arg);
321 	if (IS_ERR(shm))
322 		return PTR_ERR(shm);
323 
324 	msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
325 	msg_arg->session = session;
326 	msg_arg->cancel_id = cancel_id;
327 	optee->ops->do_call_with_arg(ctx, shm);
328 
329 	tee_shm_free(shm);
330 	return 0;
331 }
332 
333 static bool is_normal_memory(pgprot_t p)
334 {
335 #if defined(CONFIG_ARM)
336 	return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
337 		((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
338 #elif defined(CONFIG_ARM64)
339 	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
340 #else
341 #error "Unuspported architecture"
342 #endif
343 }
344 
345 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
346 {
347 	while (vma && is_normal_memory(vma->vm_page_prot)) {
348 		if (vma->vm_end >= end)
349 			return 0;
350 		vma = vma->vm_next;
351 	}
352 
353 	return -EINVAL;
354 }
355 
356 int optee_check_mem_type(unsigned long start, size_t num_pages)
357 {
358 	struct mm_struct *mm = current->mm;
359 	int rc;
360 
361 	/*
362 	 * Allow kernel address to register with OP-TEE as kernel
363 	 * pages are configured as normal memory only.
364 	 */
365 	if (virt_addr_valid(start))
366 		return 0;
367 
368 	mmap_read_lock(mm);
369 	rc = __check_mem_type(find_vma(mm, start),
370 			      start + num_pages * PAGE_SIZE);
371 	mmap_read_unlock(mm);
372 
373 	return rc;
374 }
375