xref: /linux/drivers/tee/optee/supp.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include "optee_private.h"
18 
19 struct optee_supp_req {
20 	struct list_head link;
21 
22 	bool in_queue;
23 	u32 func;
24 	u32 ret;
25 	size_t num_params;
26 	struct tee_param *param;
27 
28 	struct completion c;
29 };
30 
31 void optee_supp_init(struct optee_supp *supp)
32 {
33 	memset(supp, 0, sizeof(*supp));
34 	mutex_init(&supp->mutex);
35 	init_completion(&supp->reqs_c);
36 	idr_init(&supp->idr);
37 	INIT_LIST_HEAD(&supp->reqs);
38 	supp->req_id = -1;
39 }
40 
41 void optee_supp_uninit(struct optee_supp *supp)
42 {
43 	mutex_destroy(&supp->mutex);
44 	idr_destroy(&supp->idr);
45 }
46 
47 void optee_supp_release(struct optee_supp *supp)
48 {
49 	int id;
50 	struct optee_supp_req *req;
51 	struct optee_supp_req *req_tmp;
52 
53 	mutex_lock(&supp->mutex);
54 
55 	/* Abort all request retrieved by supplicant */
56 	idr_for_each_entry(&supp->idr, req, id) {
57 		idr_remove(&supp->idr, id);
58 		req->ret = TEEC_ERROR_COMMUNICATION;
59 		complete(&req->c);
60 	}
61 
62 	/* Abort all queued requests */
63 	list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
64 		list_del(&req->link);
65 		req->in_queue = false;
66 		req->ret = TEEC_ERROR_COMMUNICATION;
67 		complete(&req->c);
68 	}
69 
70 	supp->ctx = NULL;
71 	supp->req_id = -1;
72 
73 	mutex_unlock(&supp->mutex);
74 }
75 
76 /**
77  * optee_supp_thrd_req() - request service from supplicant
78  * @ctx:	context doing the request
79  * @func:	function requested
80  * @num_params:	number of elements in @param array
81  * @param:	parameters for function
82  *
83  * Returns result of operation to be passed to secure world
84  */
85 u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
86 			struct tee_param *param)
87 
88 {
89 	struct optee *optee = tee_get_drvdata(ctx->teedev);
90 	struct optee_supp *supp = &optee->supp;
91 	struct optee_supp_req *req;
92 	bool interruptable;
93 	u32 ret;
94 
95 	/*
96 	 * Return in case there is no supplicant available and
97 	 * non-blocking request.
98 	 */
99 	if (!supp->ctx && ctx->supp_nowait)
100 		return TEEC_ERROR_COMMUNICATION;
101 
102 	req = kzalloc(sizeof(*req), GFP_KERNEL);
103 	if (!req)
104 		return TEEC_ERROR_OUT_OF_MEMORY;
105 
106 	init_completion(&req->c);
107 	req->func = func;
108 	req->num_params = num_params;
109 	req->param = param;
110 
111 	/* Insert the request in the request list */
112 	mutex_lock(&supp->mutex);
113 	list_add_tail(&req->link, &supp->reqs);
114 	req->in_queue = true;
115 	mutex_unlock(&supp->mutex);
116 
117 	/* Tell an eventual waiter there's a new request */
118 	complete(&supp->reqs_c);
119 
120 	/*
121 	 * Wait for supplicant to process and return result, once we've
122 	 * returned from wait_for_completion(&req->c) successfully we have
123 	 * exclusive access again.
124 	 */
125 	while (wait_for_completion_interruptible(&req->c)) {
126 		mutex_lock(&supp->mutex);
127 		interruptable = !supp->ctx;
128 		if (interruptable) {
129 			/*
130 			 * There's no supplicant available and since the
131 			 * supp->mutex currently is held none can
132 			 * become available until the mutex released
133 			 * again.
134 			 *
135 			 * Interrupting an RPC to supplicant is only
136 			 * allowed as a way of slightly improving the user
137 			 * experience in case the supplicant hasn't been
138 			 * started yet. During normal operation the supplicant
139 			 * will serve all requests in a timely manner and
140 			 * interrupting then wouldn't make sense.
141 			 */
142 			if (req->in_queue) {
143 				list_del(&req->link);
144 				req->in_queue = false;
145 			}
146 		}
147 		mutex_unlock(&supp->mutex);
148 
149 		if (interruptable) {
150 			req->ret = TEEC_ERROR_COMMUNICATION;
151 			break;
152 		}
153 	}
154 
155 	ret = req->ret;
156 	kfree(req);
157 
158 	return ret;
159 }
160 
161 static struct optee_supp_req  *supp_pop_entry(struct optee_supp *supp,
162 					      int num_params, int *id)
163 {
164 	struct optee_supp_req *req;
165 
166 	if (supp->req_id != -1) {
167 		/*
168 		 * Supplicant should not mix synchronous and asnynchronous
169 		 * requests.
170 		 */
171 		return ERR_PTR(-EINVAL);
172 	}
173 
174 	if (list_empty(&supp->reqs))
175 		return NULL;
176 
177 	req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
178 
179 	if (num_params < req->num_params) {
180 		/* Not enough room for parameters */
181 		return ERR_PTR(-EINVAL);
182 	}
183 
184 	*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
185 	if (*id < 0)
186 		return ERR_PTR(-ENOMEM);
187 
188 	list_del(&req->link);
189 	req->in_queue = false;
190 
191 	return req;
192 }
193 
194 static int supp_check_recv_params(size_t num_params, struct tee_param *params,
195 				  size_t *num_meta)
196 {
197 	size_t n;
198 
199 	if (!num_params)
200 		return -EINVAL;
201 
202 	/*
203 	 * If there's memrefs we need to decrease those as they where
204 	 * increased earlier and we'll even refuse to accept any below.
205 	 */
206 	for (n = 0; n < num_params; n++)
207 		if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
208 			tee_shm_put(params[n].u.memref.shm);
209 
210 	/*
211 	 * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
212 	 * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
213 	 */
214 	for (n = 0; n < num_params; n++)
215 		if (params[n].attr &&
216 		    params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
217 			return -EINVAL;
218 
219 	/* At most we'll need one meta parameter so no need to check for more */
220 	if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
221 		*num_meta = 1;
222 	else
223 		*num_meta = 0;
224 
225 	return 0;
226 }
227 
228 /**
229  * optee_supp_recv() - receive request for supplicant
230  * @ctx:	context receiving the request
231  * @func:	requested function in supplicant
232  * @num_params:	number of elements allocated in @param, updated with number
233  *		used elements
234  * @param:	space for parameters for @func
235  *
236  * Returns 0 on success or <0 on failure
237  */
238 int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
239 		    struct tee_param *param)
240 {
241 	struct tee_device *teedev = ctx->teedev;
242 	struct optee *optee = tee_get_drvdata(teedev);
243 	struct optee_supp *supp = &optee->supp;
244 	struct optee_supp_req *req = NULL;
245 	int id;
246 	size_t num_meta;
247 	int rc;
248 
249 	rc = supp_check_recv_params(*num_params, param, &num_meta);
250 	if (rc)
251 		return rc;
252 
253 	while (true) {
254 		mutex_lock(&supp->mutex);
255 		req = supp_pop_entry(supp, *num_params - num_meta, &id);
256 		mutex_unlock(&supp->mutex);
257 
258 		if (req) {
259 			if (IS_ERR(req))
260 				return PTR_ERR(req);
261 			break;
262 		}
263 
264 		/*
265 		 * If we didn't get a request we'll block in
266 		 * wait_for_completion() to avoid needless spinning.
267 		 *
268 		 * This is where supplicant will be hanging most of
269 		 * the time, let's make this interruptable so we
270 		 * can easily restart supplicant if needed.
271 		 */
272 		if (wait_for_completion_interruptible(&supp->reqs_c))
273 			return -ERESTARTSYS;
274 	}
275 
276 	if (num_meta) {
277 		/*
278 		 * tee-supplicant support meta parameters -> requsts can be
279 		 * processed asynchronously.
280 		 */
281 		param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
282 			      TEE_IOCTL_PARAM_ATTR_META;
283 		param->u.value.a = id;
284 		param->u.value.b = 0;
285 		param->u.value.c = 0;
286 	} else {
287 		mutex_lock(&supp->mutex);
288 		supp->req_id = id;
289 		mutex_unlock(&supp->mutex);
290 	}
291 
292 	*func = req->func;
293 	*num_params = req->num_params + num_meta;
294 	memcpy(param + num_meta, req->param,
295 	       sizeof(struct tee_param) * req->num_params);
296 
297 	return 0;
298 }
299 
300 static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
301 					   size_t num_params,
302 					   struct tee_param *param,
303 					   size_t *num_meta)
304 {
305 	struct optee_supp_req *req;
306 	int id;
307 	size_t nm;
308 	const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
309 			 TEE_IOCTL_PARAM_ATTR_META;
310 
311 	if (!num_params)
312 		return ERR_PTR(-EINVAL);
313 
314 	if (supp->req_id == -1) {
315 		if (param->attr != attr)
316 			return ERR_PTR(-EINVAL);
317 		id = param->u.value.a;
318 		nm = 1;
319 	} else {
320 		id = supp->req_id;
321 		nm = 0;
322 	}
323 
324 	req = idr_find(&supp->idr, id);
325 	if (!req)
326 		return ERR_PTR(-ENOENT);
327 
328 	if ((num_params - nm) != req->num_params)
329 		return ERR_PTR(-EINVAL);
330 
331 	idr_remove(&supp->idr, id);
332 	supp->req_id = -1;
333 	*num_meta = nm;
334 
335 	return req;
336 }
337 
338 /**
339  * optee_supp_send() - send result of request from supplicant
340  * @ctx:	context sending result
341  * @ret:	return value of request
342  * @num_params:	number of parameters returned
343  * @param:	returned parameters
344  *
345  * Returns 0 on success or <0 on failure.
346  */
347 int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
348 		    struct tee_param *param)
349 {
350 	struct tee_device *teedev = ctx->teedev;
351 	struct optee *optee = tee_get_drvdata(teedev);
352 	struct optee_supp *supp = &optee->supp;
353 	struct optee_supp_req *req;
354 	size_t n;
355 	size_t num_meta;
356 
357 	mutex_lock(&supp->mutex);
358 	req = supp_pop_req(supp, num_params, param, &num_meta);
359 	mutex_unlock(&supp->mutex);
360 
361 	if (IS_ERR(req)) {
362 		/* Something is wrong, let supplicant restart. */
363 		return PTR_ERR(req);
364 	}
365 
366 	/* Update out and in/out parameters */
367 	for (n = 0; n < req->num_params; n++) {
368 		struct tee_param *p = req->param + n;
369 
370 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
371 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
372 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
373 			p->u.value.a = param[n + num_meta].u.value.a;
374 			p->u.value.b = param[n + num_meta].u.value.b;
375 			p->u.value.c = param[n + num_meta].u.value.c;
376 			break;
377 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
378 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
379 			p->u.memref.size = param[n + num_meta].u.memref.size;
380 			break;
381 		default:
382 			break;
383 		}
384 	}
385 	req->ret = ret;
386 
387 	/* Let the requesting thread continue */
388 	complete(&req->c);
389 
390 	return 0;
391 }
392