xref: /linux/drivers/tee/tee_shm.c (revision c02ce1735b150cf7c3b43790b48e23dcd17c0d46)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4  */
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
7 #include <linux/idr.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/uaccess.h>
13 #include <linux/uio.h>
14 #include <linux/highmem.h>
15 #include "tee_private.h"
16 
17 static void shm_put_kernel_pages(struct page **pages, size_t page_count)
18 {
19 	size_t n;
20 
21 	for (n = 0; n < page_count; n++)
22 		put_page(pages[n]);
23 }
24 
25 static void shm_get_kernel_pages(struct page **pages, size_t page_count)
26 {
27 	size_t n;
28 
29 	for (n = 0; n < page_count; n++)
30 		get_page(pages[n]);
31 }
32 
33 static void release_registered_pages(struct tee_shm *shm)
34 {
35 	if (shm->pages) {
36 		if (shm->flags & TEE_SHM_USER_MAPPED)
37 			unpin_user_pages(shm->pages, shm->num_pages);
38 		else
39 			shm_put_kernel_pages(shm->pages, shm->num_pages);
40 
41 		kfree(shm->pages);
42 	}
43 }
44 
45 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
46 {
47 	if (shm->flags & TEE_SHM_POOL) {
48 		teedev->pool->ops->free(teedev->pool, shm);
49 	} else if (shm->flags & TEE_SHM_DYNAMIC) {
50 		int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
51 
52 		if (rc)
53 			dev_err(teedev->dev.parent,
54 				"unregister shm %p failed: %d", shm, rc);
55 
56 		release_registered_pages(shm);
57 	}
58 
59 	teedev_ctx_put(shm->ctx);
60 
61 	kfree(shm);
62 
63 	tee_device_put(teedev);
64 }
65 
66 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
67 					size_t align, u32 flags, int id)
68 {
69 	struct tee_device *teedev = ctx->teedev;
70 	struct tee_shm *shm;
71 	void *ret;
72 	int rc;
73 
74 	if (!tee_device_get(teedev))
75 		return ERR_PTR(-EINVAL);
76 
77 	if (!teedev->pool) {
78 		/* teedev has been detached from driver */
79 		ret = ERR_PTR(-EINVAL);
80 		goto err_dev_put;
81 	}
82 
83 	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
84 	if (!shm) {
85 		ret = ERR_PTR(-ENOMEM);
86 		goto err_dev_put;
87 	}
88 
89 	refcount_set(&shm->refcount, 1);
90 	shm->flags = flags;
91 	shm->id = id;
92 
93 	/*
94 	 * We're assigning this as it is needed if the shm is to be
95 	 * registered. If this function returns OK then the caller expected
96 	 * to call teedev_ctx_get() or clear shm->ctx in case it's not
97 	 * needed any longer.
98 	 */
99 	shm->ctx = ctx;
100 
101 	rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
102 	if (rc) {
103 		ret = ERR_PTR(rc);
104 		goto err_kfree;
105 	}
106 
107 	teedev_ctx_get(ctx);
108 	return shm;
109 err_kfree:
110 	kfree(shm);
111 err_dev_put:
112 	tee_device_put(teedev);
113 	return ret;
114 }
115 
116 /**
117  * tee_shm_alloc_user_buf() - Allocate shared memory for user space
118  * @ctx:	Context that allocates the shared memory
119  * @size:	Requested size of shared memory
120  *
121  * Memory allocated as user space shared memory is automatically freed when
122  * the TEE file pointer is closed. The primary usage of this function is
123  * when the TEE driver doesn't support registering ordinary user space
124  * memory.
125  *
126  * @returns a pointer to 'struct tee_shm'
127  */
128 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
129 {
130 	u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
131 	struct tee_device *teedev = ctx->teedev;
132 	struct tee_shm *shm;
133 	void *ret;
134 	int id;
135 
136 	mutex_lock(&teedev->mutex);
137 	id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
138 	mutex_unlock(&teedev->mutex);
139 	if (id < 0)
140 		return ERR_PTR(id);
141 
142 	shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
143 	if (IS_ERR(shm)) {
144 		mutex_lock(&teedev->mutex);
145 		idr_remove(&teedev->idr, id);
146 		mutex_unlock(&teedev->mutex);
147 		return shm;
148 	}
149 
150 	mutex_lock(&teedev->mutex);
151 	ret = idr_replace(&teedev->idr, shm, id);
152 	mutex_unlock(&teedev->mutex);
153 	if (IS_ERR(ret)) {
154 		tee_shm_free(shm);
155 		return ret;
156 	}
157 
158 	return shm;
159 }
160 
161 /**
162  * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
163  * @ctx:	Context that allocates the shared memory
164  * @size:	Requested size of shared memory
165  *
166  * The returned memory registered in secure world and is suitable to be
167  * passed as a memory buffer in parameter argument to
168  * tee_client_invoke_func(). The memory allocated is later freed with a
169  * call to tee_shm_free().
170  *
171  * @returns a pointer to 'struct tee_shm'
172  */
173 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
174 {
175 	u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
176 
177 	return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
178 }
179 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
180 
181 /**
182  * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
183  *			      kernel buffer
184  * @ctx:	Context that allocates the shared memory
185  * @size:	Requested size of shared memory
186  *
187  * This function returns similar shared memory as
188  * tee_shm_alloc_kernel_buf(), but with the difference that the memory
189  * might not be registered in secure world in case the driver supports
190  * passing memory not registered in advance.
191  *
192  * This function should normally only be used internally in the TEE
193  * drivers.
194  *
195  * @returns a pointer to 'struct tee_shm'
196  */
197 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
198 {
199 	u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
200 
201 	return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
202 }
203 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
204 
205 static struct tee_shm *
206 register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
207 		    int id)
208 {
209 	struct tee_device *teedev = ctx->teedev;
210 	struct tee_shm *shm;
211 	unsigned long start, addr;
212 	size_t num_pages, off;
213 	ssize_t len;
214 	void *ret;
215 	int rc;
216 
217 	if (!tee_device_get(teedev))
218 		return ERR_PTR(-EINVAL);
219 
220 	if (!teedev->desc->ops->shm_register ||
221 	    !teedev->desc->ops->shm_unregister) {
222 		ret = ERR_PTR(-ENOTSUPP);
223 		goto err_dev_put;
224 	}
225 
226 	teedev_ctx_get(ctx);
227 
228 	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
229 	if (!shm) {
230 		ret = ERR_PTR(-ENOMEM);
231 		goto err_ctx_put;
232 	}
233 
234 	refcount_set(&shm->refcount, 1);
235 	shm->flags = flags;
236 	shm->ctx = ctx;
237 	shm->id = id;
238 	addr = untagged_addr((unsigned long)iter_iov_addr(iter));
239 	start = rounddown(addr, PAGE_SIZE);
240 	num_pages = iov_iter_npages(iter, INT_MAX);
241 	if (!num_pages) {
242 		ret = ERR_PTR(-ENOMEM);
243 		goto err_ctx_put;
244 	}
245 
246 	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
247 	if (!shm->pages) {
248 		ret = ERR_PTR(-ENOMEM);
249 		goto err_free_shm;
250 	}
251 
252 	len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
253 				     &off);
254 	if (unlikely(len <= 0)) {
255 		ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
256 		goto err_free_shm_pages;
257 	}
258 
259 	/*
260 	 * iov_iter_extract_kvec_pages does not get reference on the pages,
261 	 * get a reference on them.
262 	 */
263 	if (iov_iter_is_kvec(iter))
264 		shm_get_kernel_pages(shm->pages, num_pages);
265 
266 	shm->offset = off;
267 	shm->size = len;
268 	shm->num_pages = num_pages;
269 
270 	rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
271 					     shm->num_pages, start);
272 	if (rc) {
273 		ret = ERR_PTR(rc);
274 		goto err_put_shm_pages;
275 	}
276 
277 	return shm;
278 err_put_shm_pages:
279 	if (!iov_iter_is_kvec(iter))
280 		unpin_user_pages(shm->pages, shm->num_pages);
281 	else
282 		shm_put_kernel_pages(shm->pages, shm->num_pages);
283 err_free_shm_pages:
284 	kfree(shm->pages);
285 err_free_shm:
286 	kfree(shm);
287 err_ctx_put:
288 	teedev_ctx_put(ctx);
289 err_dev_put:
290 	tee_device_put(teedev);
291 	return ret;
292 }
293 
294 /**
295  * tee_shm_register_user_buf() - Register a userspace shared memory buffer
296  * @ctx:	Context that registers the shared memory
297  * @addr:	The userspace address of the shared buffer
298  * @length:	Length of the shared buffer
299  *
300  * @returns a pointer to 'struct tee_shm'
301  */
302 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
303 					  unsigned long addr, size_t length)
304 {
305 	u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
306 	struct tee_device *teedev = ctx->teedev;
307 	struct tee_shm *shm;
308 	struct iov_iter iter;
309 	void *ret;
310 	int id;
311 
312 	if (!access_ok((void __user *)addr, length))
313 		return ERR_PTR(-EFAULT);
314 
315 	mutex_lock(&teedev->mutex);
316 	id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
317 	mutex_unlock(&teedev->mutex);
318 	if (id < 0)
319 		return ERR_PTR(id);
320 
321 	iov_iter_ubuf(&iter, ITER_DEST,  (void __user *)addr, length);
322 	shm = register_shm_helper(ctx, &iter, flags, id);
323 	if (IS_ERR(shm)) {
324 		mutex_lock(&teedev->mutex);
325 		idr_remove(&teedev->idr, id);
326 		mutex_unlock(&teedev->mutex);
327 		return shm;
328 	}
329 
330 	mutex_lock(&teedev->mutex);
331 	ret = idr_replace(&teedev->idr, shm, id);
332 	mutex_unlock(&teedev->mutex);
333 	if (IS_ERR(ret)) {
334 		tee_shm_free(shm);
335 		return ret;
336 	}
337 
338 	return shm;
339 }
340 
341 /**
342  * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
343  *				   secure world
344  * @ctx:	Context that registers the shared memory
345  * @addr:	The buffer
346  * @length:	Length of the buffer
347  *
348  * @returns a pointer to 'struct tee_shm'
349  */
350 
351 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
352 					    void *addr, size_t length)
353 {
354 	u32 flags = TEE_SHM_DYNAMIC;
355 	struct kvec kvec;
356 	struct iov_iter iter;
357 
358 	kvec.iov_base = addr;
359 	kvec.iov_len = length;
360 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
361 
362 	return register_shm_helper(ctx, &iter, flags, -1);
363 }
364 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
365 
366 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
367 {
368 	tee_shm_put(filp->private_data);
369 	return 0;
370 }
371 
372 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
373 {
374 	struct tee_shm *shm = filp->private_data;
375 	size_t size = vma->vm_end - vma->vm_start;
376 
377 	/* Refuse sharing shared memory provided by application */
378 	if (shm->flags & TEE_SHM_USER_MAPPED)
379 		return -EINVAL;
380 
381 	/* check for overflowing the buffer's size */
382 	if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
383 		return -EINVAL;
384 
385 	return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
386 			       size, vma->vm_page_prot);
387 }
388 
389 static const struct file_operations tee_shm_fops = {
390 	.owner = THIS_MODULE,
391 	.release = tee_shm_fop_release,
392 	.mmap = tee_shm_fop_mmap,
393 };
394 
395 /**
396  * tee_shm_get_fd() - Increase reference count and return file descriptor
397  * @shm:	Shared memory handle
398  * @returns user space file descriptor to shared memory
399  */
400 int tee_shm_get_fd(struct tee_shm *shm)
401 {
402 	int fd;
403 
404 	if (shm->id < 0)
405 		return -EINVAL;
406 
407 	/* matched by tee_shm_put() in tee_shm_op_release() */
408 	refcount_inc(&shm->refcount);
409 	fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
410 	if (fd < 0)
411 		tee_shm_put(shm);
412 	return fd;
413 }
414 
415 /**
416  * tee_shm_free() - Free shared memory
417  * @shm:	Handle to shared memory to free
418  */
419 void tee_shm_free(struct tee_shm *shm)
420 {
421 	tee_shm_put(shm);
422 }
423 EXPORT_SYMBOL_GPL(tee_shm_free);
424 
425 /**
426  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
427  * @shm:	Shared memory handle
428  * @offs:	Offset from start of this shared memory
429  * @returns virtual address of the shared memory + offs if offs is within
430  *	the bounds of this shared memory, else an ERR_PTR
431  */
432 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
433 {
434 	if (!shm->kaddr)
435 		return ERR_PTR(-EINVAL);
436 	if (offs >= shm->size)
437 		return ERR_PTR(-EINVAL);
438 	return (char *)shm->kaddr + offs;
439 }
440 EXPORT_SYMBOL_GPL(tee_shm_get_va);
441 
442 /**
443  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
444  * @shm:	Shared memory handle
445  * @offs:	Offset from start of this shared memory
446  * @pa:		Physical address to return
447  * @returns 0 if offs is within the bounds of this shared memory, else an
448  *	error code.
449  */
450 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
451 {
452 	if (offs >= shm->size)
453 		return -EINVAL;
454 	if (pa)
455 		*pa = shm->paddr + offs;
456 	return 0;
457 }
458 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
459 
460 /**
461  * tee_shm_get_from_id() - Find shared memory object and increase reference
462  * count
463  * @ctx:	Context owning the shared memory
464  * @id:		Id of shared memory object
465  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
466  */
467 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
468 {
469 	struct tee_device *teedev;
470 	struct tee_shm *shm;
471 
472 	if (!ctx)
473 		return ERR_PTR(-EINVAL);
474 
475 	teedev = ctx->teedev;
476 	mutex_lock(&teedev->mutex);
477 	shm = idr_find(&teedev->idr, id);
478 	/*
479 	 * If the tee_shm was found in the IDR it must have a refcount
480 	 * larger than 0 due to the guarantee in tee_shm_put() below. So
481 	 * it's safe to use refcount_inc().
482 	 */
483 	if (!shm || shm->ctx != ctx)
484 		shm = ERR_PTR(-EINVAL);
485 	else
486 		refcount_inc(&shm->refcount);
487 	mutex_unlock(&teedev->mutex);
488 	return shm;
489 }
490 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
491 
492 /**
493  * tee_shm_put() - Decrease reference count on a shared memory handle
494  * @shm:	Shared memory handle
495  */
496 void tee_shm_put(struct tee_shm *shm)
497 {
498 	struct tee_device *teedev = shm->ctx->teedev;
499 	bool do_release = false;
500 
501 	mutex_lock(&teedev->mutex);
502 	if (refcount_dec_and_test(&shm->refcount)) {
503 		/*
504 		 * refcount has reached 0, we must now remove it from the
505 		 * IDR before releasing the mutex. This will guarantee that
506 		 * the refcount_inc() in tee_shm_get_from_id() never starts
507 		 * from 0.
508 		 */
509 		if (shm->id >= 0)
510 			idr_remove(&teedev->idr, shm->id);
511 		do_release = true;
512 	}
513 	mutex_unlock(&teedev->mutex);
514 
515 	if (do_release)
516 		tee_shm_release(teedev, shm);
517 }
518 EXPORT_SYMBOL_GPL(tee_shm_put);
519