xref: /linux/kernel/bpf/crypto.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Meta, Inc */
3 #include <linux/bpf.h>
4 #include <linux/bpf_crypto.h>
5 #include <linux/bpf_mem_alloc.h>
6 #include <linux/btf.h>
7 #include <linux/btf_ids.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 #include <crypto/skcipher.h>
12 
13 struct bpf_crypto_type_list {
14 	const struct bpf_crypto_type *type;
15 	struct list_head list;
16 };
17 
18 /* BPF crypto initialization parameters struct */
19 /**
20  * struct bpf_crypto_params - BPF crypto initialization parameters structure
21  * @type:	The string of crypto operation type.
22  * @reserved:	Reserved member, will be reused for more options in future
23  *		Values:
24  *		  0
25  * @algo:	The string of algorithm to initialize.
26  * @key:	The cipher key used to init crypto algorithm.
27  * @key_len:	The length of cipher key.
28  * @authsize:	The length of authentication tag used by algorithm.
29  */
30 struct bpf_crypto_params {
31 	char type[14];
32 	u8 reserved[2];
33 	char algo[128];
34 	u8 key[256];
35 	u32 key_len;
36 	u32 authsize;
37 };
38 
39 static LIST_HEAD(bpf_crypto_types);
40 static DECLARE_RWSEM(bpf_crypto_types_sem);
41 
42 /**
43  * struct bpf_crypto_ctx - refcounted BPF crypto context structure
44  * @type:	The pointer to bpf crypto type
45  * @tfm:	The pointer to instance of crypto API struct.
46  * @siv_len:    Size of IV and state storage for cipher
47  * @rcu:	The RCU head used to free the crypto context with RCU safety.
48  * @usage:	Object reference counter. When the refcount goes to 0, the
49  *		memory is released back to the BPF allocator, which provides
50  *		RCU safety.
51  */
52 struct bpf_crypto_ctx {
53 	const struct bpf_crypto_type *type;
54 	void *tfm;
55 	u32 siv_len;
56 	struct rcu_head rcu;
57 	refcount_t usage;
58 };
59 
bpf_crypto_register_type(const struct bpf_crypto_type * type)60 int bpf_crypto_register_type(const struct bpf_crypto_type *type)
61 {
62 	struct bpf_crypto_type_list *node;
63 	int err = -EEXIST;
64 
65 	down_write(&bpf_crypto_types_sem);
66 	list_for_each_entry(node, &bpf_crypto_types, list) {
67 		if (!strcmp(node->type->name, type->name))
68 			goto unlock;
69 	}
70 
71 	node = kmalloc(sizeof(*node), GFP_KERNEL);
72 	err = -ENOMEM;
73 	if (!node)
74 		goto unlock;
75 
76 	node->type = type;
77 	list_add(&node->list, &bpf_crypto_types);
78 	err = 0;
79 
80 unlock:
81 	up_write(&bpf_crypto_types_sem);
82 
83 	return err;
84 }
85 EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
86 
bpf_crypto_unregister_type(const struct bpf_crypto_type * type)87 int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
88 {
89 	struct bpf_crypto_type_list *node;
90 	int err = -ENOENT;
91 
92 	down_write(&bpf_crypto_types_sem);
93 	list_for_each_entry(node, &bpf_crypto_types, list) {
94 		if (strcmp(node->type->name, type->name))
95 			continue;
96 
97 		list_del(&node->list);
98 		kfree(node);
99 		err = 0;
100 		break;
101 	}
102 	up_write(&bpf_crypto_types_sem);
103 
104 	return err;
105 }
106 EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
107 
bpf_crypto_get_type(const char * name)108 static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
109 {
110 	const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
111 	struct bpf_crypto_type_list *node;
112 
113 	down_read(&bpf_crypto_types_sem);
114 	list_for_each_entry(node, &bpf_crypto_types, list) {
115 		if (strcmp(node->type->name, name))
116 			continue;
117 
118 		if (try_module_get(node->type->owner))
119 			type = node->type;
120 		break;
121 	}
122 	up_read(&bpf_crypto_types_sem);
123 
124 	return type;
125 }
126 
127 __bpf_kfunc_start_defs();
128 
129 /**
130  * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
131  *
132  * Allocates a crypto context that can be used, acquired, and released by
133  * a BPF program. The crypto context returned by this function must either
134  * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
135  * As crypto API functions use GFP_KERNEL allocations, this function can
136  * only be used in sleepable BPF programs.
137  *
138  * bpf_crypto_ctx_create() allocates memory for crypto context.
139  * It may return NULL if no memory is available.
140  * @params:	pointer to struct bpf_crypto_params which contains all the
141  *		details needed to initialise crypto context.
142  * @params__sz:	size of steuct bpf_crypto_params usef by bpf program
143  * @err:	integer to store error code when NULL is returned.
144  */
145 __bpf_kfunc struct bpf_crypto_ctx *
bpf_crypto_ctx_create(const struct bpf_crypto_params * params,u32 params__sz,int * err)146 bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz,
147 		      int *err)
148 {
149 	const struct bpf_crypto_type *type;
150 	struct bpf_crypto_ctx *ctx;
151 
152 	if (!params || params->reserved[0] || params->reserved[1] ||
153 	    params__sz != sizeof(struct bpf_crypto_params)) {
154 		*err = -EINVAL;
155 		return NULL;
156 	}
157 
158 	type = bpf_crypto_get_type(params->type);
159 	if (IS_ERR(type)) {
160 		*err = PTR_ERR(type);
161 		return NULL;
162 	}
163 
164 	if (!type->has_algo(params->algo)) {
165 		*err = -EOPNOTSUPP;
166 		goto err_module_put;
167 	}
168 
169 	if (!!params->authsize ^ !!type->setauthsize) {
170 		*err = -EOPNOTSUPP;
171 		goto err_module_put;
172 	}
173 
174 	if (!params->key_len || params->key_len > sizeof(params->key)) {
175 		*err = -EINVAL;
176 		goto err_module_put;
177 	}
178 
179 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
180 	if (!ctx) {
181 		*err = -ENOMEM;
182 		goto err_module_put;
183 	}
184 
185 	ctx->type = type;
186 	ctx->tfm = type->alloc_tfm(params->algo);
187 	if (IS_ERR(ctx->tfm)) {
188 		*err = PTR_ERR(ctx->tfm);
189 		goto err_free_ctx;
190 	}
191 
192 	if (params->authsize) {
193 		*err = type->setauthsize(ctx->tfm, params->authsize);
194 		if (*err)
195 			goto err_free_tfm;
196 	}
197 
198 	*err = type->setkey(ctx->tfm, params->key, params->key_len);
199 	if (*err)
200 		goto err_free_tfm;
201 
202 	if (type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY) {
203 		*err = -EINVAL;
204 		goto err_free_tfm;
205 	}
206 
207 	ctx->siv_len = type->ivsize(ctx->tfm) + type->statesize(ctx->tfm);
208 
209 	refcount_set(&ctx->usage, 1);
210 
211 	return ctx;
212 
213 err_free_tfm:
214 	type->free_tfm(ctx->tfm);
215 err_free_ctx:
216 	kfree(ctx);
217 err_module_put:
218 	module_put(type->owner);
219 
220 	return NULL;
221 }
222 
crypto_free_cb(struct rcu_head * head)223 static void crypto_free_cb(struct rcu_head *head)
224 {
225 	struct bpf_crypto_ctx *ctx;
226 
227 	ctx = container_of(head, struct bpf_crypto_ctx, rcu);
228 	ctx->type->free_tfm(ctx->tfm);
229 	module_put(ctx->type->owner);
230 	kfree(ctx);
231 }
232 
233 /**
234  * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
235  * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
236  *	     pointer.
237  *
238  * Acquires a reference to a BPF crypto context. The context returned by this function
239  * must either be embedded in a map as a kptr, or freed with
240  * bpf_crypto_ctx_release().
241  */
242 __bpf_kfunc struct bpf_crypto_ctx *
bpf_crypto_ctx_acquire(struct bpf_crypto_ctx * ctx)243 bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
244 {
245 	if (!refcount_inc_not_zero(&ctx->usage))
246 		return NULL;
247 	return ctx;
248 }
249 
250 /**
251  * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
252  * @ctx: The crypto context being released.
253  *
254  * Releases a previously acquired reference to a BPF crypto context. When the final
255  * reference of the BPF crypto context has been released, its memory
256  * will be released.
257  */
bpf_crypto_ctx_release(struct bpf_crypto_ctx * ctx)258 __bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
259 {
260 	if (refcount_dec_and_test(&ctx->usage))
261 		call_rcu(&ctx->rcu, crypto_free_cb);
262 }
263 
bpf_crypto_crypt(const struct bpf_crypto_ctx * ctx,const struct bpf_dynptr_kern * src,const struct bpf_dynptr_kern * dst,const struct bpf_dynptr_kern * siv,bool decrypt)264 static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
265 			    const struct bpf_dynptr_kern *src,
266 			    const struct bpf_dynptr_kern *dst,
267 			    const struct bpf_dynptr_kern *siv,
268 			    bool decrypt)
269 {
270 	u32 src_len, dst_len, siv_len;
271 	const u8 *psrc;
272 	u8 *pdst, *piv;
273 	int err;
274 
275 	if (__bpf_dynptr_is_rdonly(dst))
276 		return -EINVAL;
277 
278 	siv_len = siv ? __bpf_dynptr_size(siv) : 0;
279 	src_len = __bpf_dynptr_size(src);
280 	dst_len = __bpf_dynptr_size(dst);
281 	if (!src_len || !dst_len)
282 		return -EINVAL;
283 
284 	if (siv_len != ctx->siv_len)
285 		return -EINVAL;
286 
287 	psrc = __bpf_dynptr_data(src, src_len);
288 	if (!psrc)
289 		return -EINVAL;
290 	pdst = __bpf_dynptr_data_rw(dst, dst_len);
291 	if (!pdst)
292 		return -EINVAL;
293 
294 	piv = siv_len ? __bpf_dynptr_data_rw(siv, siv_len) : NULL;
295 	if (siv_len && !piv)
296 		return -EINVAL;
297 
298 	err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
299 		      : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
300 
301 	return err;
302 }
303 
304 /**
305  * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
306  * @ctx:		The crypto context being used. The ctx must be a trusted pointer.
307  * @src:		bpf_dynptr to the encrypted data. Must be a trusted pointer.
308  * @dst:		bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
309  * @siv__nullable:	bpf_dynptr to IV data and state data to be used by decryptor. May be NULL.
310  *
311  * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
312  */
bpf_crypto_decrypt(struct bpf_crypto_ctx * ctx,const struct bpf_dynptr * src,const struct bpf_dynptr * dst,const struct bpf_dynptr * siv__nullable)313 __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
314 				   const struct bpf_dynptr *src,
315 				   const struct bpf_dynptr *dst,
316 				   const struct bpf_dynptr *siv__nullable)
317 {
318 	const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
319 	const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
320 	const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable;
321 
322 	return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true);
323 }
324 
325 /**
326  * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
327  * @ctx:		The crypto context being used. The ctx must be a trusted pointer.
328  * @src:		bpf_dynptr to the plain data. Must be a trusted pointer.
329  * @dst:		bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
330  * @siv__nullable:	bpf_dynptr to IV data and state data to be used by decryptor. May be NULL.
331  *
332  * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
333  */
bpf_crypto_encrypt(struct bpf_crypto_ctx * ctx,const struct bpf_dynptr * src,const struct bpf_dynptr * dst,const struct bpf_dynptr * siv__nullable)334 __bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
335 				   const struct bpf_dynptr *src,
336 				   const struct bpf_dynptr *dst,
337 				   const struct bpf_dynptr *siv__nullable)
338 {
339 	const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
340 	const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
341 	const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable;
342 
343 	return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false);
344 }
345 
346 __bpf_kfunc_end_defs();
347 
348 BTF_KFUNCS_START(crypt_init_kfunc_btf_ids)
349 BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
350 BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
351 BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
352 BTF_KFUNCS_END(crypt_init_kfunc_btf_ids)
353 
354 static const struct btf_kfunc_id_set crypt_init_kfunc_set = {
355 	.owner = THIS_MODULE,
356 	.set   = &crypt_init_kfunc_btf_ids,
357 };
358 
359 BTF_KFUNCS_START(crypt_kfunc_btf_ids)
360 BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU)
361 BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU)
362 BTF_KFUNCS_END(crypt_kfunc_btf_ids)
363 
364 static const struct btf_kfunc_id_set crypt_kfunc_set = {
365 	.owner = THIS_MODULE,
366 	.set   = &crypt_kfunc_btf_ids,
367 };
368 
369 BTF_ID_LIST(bpf_crypto_dtor_ids)
BTF_ID(struct,bpf_crypto_ctx)370 BTF_ID(struct, bpf_crypto_ctx)
371 BTF_ID(func, bpf_crypto_ctx_release)
372 
373 static int __init crypto_kfunc_init(void)
374 {
375 	int ret;
376 	const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = {
377 		{
378 			.btf_id	      = bpf_crypto_dtor_ids[0],
379 			.kfunc_btf_id = bpf_crypto_dtor_ids[1]
380 		},
381 	};
382 
383 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set);
384 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set);
385 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set);
386 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
387 					       &crypt_init_kfunc_set);
388 	return  ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors,
389 						   ARRAY_SIZE(bpf_crypto_dtors),
390 						   THIS_MODULE);
391 }
392 
393 late_initcall(crypto_kfunc_init);
394