xref: /linux/kernel/bpf/bpf_struct_ops.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16 
17 struct bpf_struct_ops_value {
18 	struct bpf_struct_ops_common_value common;
19 	char data[] ____cacheline_aligned_in_smp;
20 };
21 
22 #define MAX_TRAMP_IMAGE_PAGES 8
23 
24 struct bpf_struct_ops_map {
25 	struct bpf_map map;
26 	const struct bpf_struct_ops_desc *st_ops_desc;
27 	/* protect map_update */
28 	struct mutex lock;
29 	/* link has all the bpf_links that is populated
30 	 * to the func ptr of the kernel's struct
31 	 * (in kvalue.data).
32 	 */
33 	struct bpf_link **links;
34 	/* ksyms for bpf trampolines */
35 	struct bpf_ksym **ksyms;
36 	u32 funcs_cnt;
37 	u32 image_pages_cnt;
38 	/* image_pages is an array of pages that has all the trampolines
39 	 * that stores the func args before calling the bpf_prog.
40 	 */
41 	void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 	/* The owner moduler's btf. */
43 	struct btf *btf;
44 	/* uvalue->data stores the kernel struct
45 	 * (e.g. tcp_congestion_ops) that is more useful
46 	 * to userspace than the kvalue.  For example,
47 	 * the bpf_prog's id is stored instead of the kernel
48 	 * address of a func ptr.
49 	 */
50 	struct bpf_struct_ops_value *uvalue;
51 	/* kvalue.data stores the actual kernel's struct
52 	 * (e.g. tcp_congestion_ops) that will be
53 	 * registered to the kernel subsystem.
54 	 */
55 	struct bpf_struct_ops_value kvalue;
56 };
57 
58 struct bpf_struct_ops_link {
59 	struct bpf_link link;
60 	struct bpf_map __rcu *map;
61 	wait_queue_head_t wait_hup;
62 };
63 
64 static DEFINE_MUTEX(update_mutex);
65 
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
68 
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
70 };
71 
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
73 #ifdef CONFIG_NET
74 	.test_run = bpf_struct_ops_test_run,
75 #endif
76 };
77 
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
81 
82 enum {
83 	IDX_MODULE_ID,
84 	IDX_ST_OPS_COMMON_VALUE_ID,
85 };
86 
87 extern struct btf *btf_vmlinux;
88 
is_valid_value_type(struct btf * btf,s32 value_id,const struct btf_type * type,const char * value_name)89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 				const struct btf_type *type,
91 				const char *value_name)
92 {
93 	const struct btf_type *common_value_type;
94 	const struct btf_member *member;
95 	const struct btf_type *vt, *mt;
96 
97 	vt = btf_type_by_id(btf, value_id);
98 	if (btf_vlen(vt) != 2) {
99 		pr_warn("The number of %s's members should be 2, but we get %d\n",
100 			value_name, btf_vlen(vt));
101 		return false;
102 	}
103 	member = btf_type_member(vt);
104 	mt = btf_type_by_id(btf, member->type);
105 	common_value_type = btf_type_by_id(btf_vmlinux,
106 					   st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 	if (mt != common_value_type) {
108 		pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
109 			value_name);
110 		return false;
111 	}
112 	member++;
113 	mt = btf_type_by_id(btf, member->type);
114 	if (mt != type) {
115 		pr_warn("The second member of %s should be %s\n",
116 			value_name, btf_name_by_offset(btf, type->name_off));
117 		return false;
118 	}
119 
120 	return true;
121 }
122 
bpf_struct_ops_image_alloc(void)123 static void *bpf_struct_ops_image_alloc(void)
124 {
125 	void *image;
126 	int err;
127 
128 	err = bpf_jit_charge_modmem(PAGE_SIZE);
129 	if (err)
130 		return ERR_PTR(err);
131 	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
132 	if (!image) {
133 		bpf_jit_uncharge_modmem(PAGE_SIZE);
134 		return ERR_PTR(-ENOMEM);
135 	}
136 
137 	return image;
138 }
139 
bpf_struct_ops_image_free(void * image)140 void bpf_struct_ops_image_free(void *image)
141 {
142 	if (image) {
143 		arch_free_bpf_trampoline(image, PAGE_SIZE);
144 		bpf_jit_uncharge_modmem(PAGE_SIZE);
145 	}
146 }
147 
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define REFCOUNTED_SUFFIX "__ref"
150 
151 /* Prepare argument info for every nullable argument of a member of a
152  * struct_ops type.
153  *
154  * Initialize a struct bpf_struct_ops_arg_info according to type info of
155  * the arguments of a stub function. (Check kCFI for more information about
156  * stub functions.)
157  *
158  * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
159  * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
160  * the information that used by the verifier to check the arguments of the
161  * BPF struct_ops program assigned to the member. Here, we only care about
162  * the arguments that are marked as __nullable.
163  *
164  * The array of struct bpf_ctx_arg_aux is eventually assigned to
165  * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
166  * verifier. (See check_struct_ops_btf_id())
167  *
168  * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
169  * fails, it will be kept untouched.
170  */
prepare_arg_info(struct btf * btf,const char * st_ops_name,const char * member_name,const struct btf_type * func_proto,void * stub_func_addr,struct bpf_struct_ops_arg_info * arg_info)171 static int prepare_arg_info(struct btf *btf,
172 			    const char *st_ops_name,
173 			    const char *member_name,
174 			    const struct btf_type *func_proto, void *stub_func_addr,
175 			    struct bpf_struct_ops_arg_info *arg_info)
176 {
177 	const struct btf_type *stub_func_proto, *pointed_type;
178 	bool is_nullable = false, is_refcounted = false;
179 	const struct btf_param *stub_args, *args;
180 	struct bpf_ctx_arg_aux *info, *info_buf;
181 	u32 nargs, arg_no, info_cnt = 0;
182 	char ksym[KSYM_SYMBOL_LEN];
183 	const char *stub_fname;
184 	const char *suffix;
185 	s32 stub_func_id;
186 	u32 arg_btf_id;
187 	int offset;
188 
189 	stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym);
190 	if (!stub_fname) {
191 		pr_warn("Cannot find the stub function name for the %s in struct %s\n",
192 			member_name, st_ops_name);
193 		return -ENOENT;
194 	}
195 
196 	stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC);
197 	if (stub_func_id < 0) {
198 		pr_warn("Cannot find the stub function %s in btf\n", stub_fname);
199 		return -ENOENT;
200 	}
201 
202 	stub_func_proto = btf_type_by_id(btf, stub_func_id);
203 	stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
204 
205 	/* Check if the number of arguments of the stub function is the same
206 	 * as the number of arguments of the function pointer.
207 	 */
208 	nargs = btf_type_vlen(func_proto);
209 	if (nargs != btf_type_vlen(stub_func_proto)) {
210 		pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n",
211 			stub_fname, member_name, st_ops_name);
212 		return -EINVAL;
213 	}
214 
215 	if (!nargs)
216 		return 0;
217 
218 	args = btf_params(func_proto);
219 	stub_args = btf_params(stub_func_proto);
220 
221 	info_buf = kzalloc_objs(*info_buf, nargs, GFP_KERNEL);
222 	if (!info_buf)
223 		return -ENOMEM;
224 
225 	/* Prepare info for every nullable argument */
226 	info = info_buf;
227 	for (arg_no = 0; arg_no < nargs; arg_no++) {
228 		/* Skip arguments that is not suffixed with
229 		 * "__nullable or __ref".
230 		 */
231 		is_nullable = btf_param_match_suffix(btf, &stub_args[arg_no],
232 						     MAYBE_NULL_SUFFIX);
233 		is_refcounted = btf_param_match_suffix(btf, &stub_args[arg_no],
234 						       REFCOUNTED_SUFFIX);
235 
236 		if (is_nullable)
237 			suffix = MAYBE_NULL_SUFFIX;
238 		else if (is_refcounted)
239 			suffix = REFCOUNTED_SUFFIX;
240 		else
241 			continue;
242 
243 		/* Should be a pointer to struct */
244 		pointed_type = btf_type_resolve_ptr(btf,
245 						    args[arg_no].type,
246 						    &arg_btf_id);
247 		if (!pointed_type ||
248 		    !btf_type_is_struct(pointed_type)) {
249 			pr_warn("stub function %s has %s tagging to an unsupported type\n",
250 				stub_fname, suffix);
251 			goto err_out;
252 		}
253 
254 		offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
255 		if (offset < 0) {
256 			pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n",
257 				stub_fname, arg_no);
258 			goto err_out;
259 		}
260 
261 		if (args[arg_no].type != stub_args[arg_no].type) {
262 			pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n",
263 				arg_no, stub_fname);
264 			goto err_out;
265 		}
266 
267 		/* Fill the information of the new argument */
268 		info->btf_id = arg_btf_id;
269 		info->btf = btf;
270 		info->offset = offset;
271 		if (is_nullable) {
272 			info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
273 		} else if (is_refcounted) {
274 			info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID;
275 			info->refcounted = true;
276 		}
277 
278 		info++;
279 		info_cnt++;
280 	}
281 
282 	if (info_cnt) {
283 		arg_info->info = info_buf;
284 		arg_info->cnt = info_cnt;
285 	} else {
286 		kfree(info_buf);
287 	}
288 
289 	return 0;
290 
291 err_out:
292 	kfree(info_buf);
293 
294 	return -EINVAL;
295 }
296 
297 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)298 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
299 {
300 	struct bpf_struct_ops_arg_info *arg_info;
301 	int i;
302 
303 	arg_info = st_ops_desc->arg_info;
304 	for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
305 		kfree(arg_info[i].info);
306 
307 	kfree(arg_info);
308 }
309 
is_module_member(const struct btf * btf,u32 id)310 static bool is_module_member(const struct btf *btf, u32 id)
311 {
312 	const struct btf_type *t;
313 
314 	t = btf_type_resolve_ptr(btf, id, NULL);
315 	if (!t)
316 		return false;
317 
318 	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
319 		return false;
320 
321 	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
322 }
323 
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)324 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
325 {
326 	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
327 
328 	return func_ptr ? 0 : -ENOTSUPP;
329 }
330 
bpf_struct_ops_desc_init(struct bpf_struct_ops_desc * st_ops_desc,struct btf * btf,struct bpf_verifier_log * log)331 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
332 			     struct btf *btf,
333 			     struct bpf_verifier_log *log)
334 {
335 	struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
336 	struct bpf_struct_ops_arg_info *arg_info;
337 	const struct btf_member *member;
338 	const struct btf_type *t;
339 	s32 type_id, value_id;
340 	char value_name[128];
341 	const char *mname;
342 	int i, err;
343 
344 	if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
345 	    sizeof(value_name)) {
346 		pr_warn("struct_ops name %s is too long\n",
347 			st_ops->name);
348 		return -EINVAL;
349 	}
350 	sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
351 
352 	if (!st_ops->cfi_stubs) {
353 		pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
354 		return -EINVAL;
355 	}
356 
357 	type_id = btf_find_by_name_kind(btf, st_ops->name,
358 					BTF_KIND_STRUCT);
359 	if (type_id < 0) {
360 		pr_warn("Cannot find struct %s in %s\n",
361 			st_ops->name, btf_get_name(btf));
362 		return -EINVAL;
363 	}
364 	t = btf_type_by_id(btf, type_id);
365 	if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
366 		pr_warn("Cannot support #%u members in struct %s\n",
367 			btf_type_vlen(t), st_ops->name);
368 		return -EINVAL;
369 	}
370 
371 	value_id = btf_find_by_name_kind(btf, value_name,
372 					 BTF_KIND_STRUCT);
373 	if (value_id < 0) {
374 		pr_warn("Cannot find struct %s in %s\n",
375 			value_name, btf_get_name(btf));
376 		return -EINVAL;
377 	}
378 	if (!is_valid_value_type(btf, value_id, t, value_name))
379 		return -EINVAL;
380 
381 	arg_info = kzalloc_objs(*arg_info, btf_type_vlen(t), GFP_KERNEL);
382 	if (!arg_info)
383 		return -ENOMEM;
384 
385 	st_ops_desc->arg_info = arg_info;
386 	st_ops_desc->type = t;
387 	st_ops_desc->type_id = type_id;
388 	st_ops_desc->value_id = value_id;
389 	st_ops_desc->value_type = btf_type_by_id(btf, value_id);
390 
391 	for_each_member(i, t, member) {
392 		const struct btf_type *func_proto, *ret_type;
393 		void **stub_func_addr;
394 		u32 moff;
395 
396 		moff = __btf_member_bit_offset(t, member) / 8;
397 		mname = btf_name_by_offset(btf, member->name_off);
398 		if (!*mname) {
399 			pr_warn("anon member in struct %s is not supported\n",
400 				st_ops->name);
401 			err = -EOPNOTSUPP;
402 			goto errout;
403 		}
404 
405 		if (__btf_member_bitfield_size(t, member)) {
406 			pr_warn("bit field member %s in struct %s is not supported\n",
407 				mname, st_ops->name);
408 			err = -EOPNOTSUPP;
409 			goto errout;
410 		}
411 
412 		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
413 			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
414 				st_ops->name);
415 			err = -EOPNOTSUPP;
416 			goto errout;
417 		}
418 
419 		func_proto = btf_type_resolve_func_ptr(btf,
420 						       member->type,
421 						       NULL);
422 
423 		/* The member is not a function pointer or
424 		 * the function pointer is not supported.
425 		 */
426 		if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
427 			continue;
428 
429 		if (func_proto->type) {
430 			ret_type = btf_type_resolve_ptr(btf, func_proto->type, NULL);
431 			if (ret_type && !__btf_type_is_struct(ret_type)) {
432 				pr_warn("func ptr %s in struct %s returns non-struct pointer, which is not supported\n",
433 					mname, st_ops->name);
434 				err = -EOPNOTSUPP;
435 				goto errout;
436 			}
437 		}
438 
439 		if (btf_distill_func_proto(log, btf,
440 					   func_proto, mname,
441 					   &st_ops->func_models[i])) {
442 			pr_warn("Error in parsing func ptr %s in struct %s\n",
443 				mname, st_ops->name);
444 			err = -EINVAL;
445 			goto errout;
446 		}
447 
448 		stub_func_addr = *(void **)(st_ops->cfi_stubs + moff);
449 		err = prepare_arg_info(btf, st_ops->name, mname,
450 				       func_proto, stub_func_addr,
451 				       arg_info + i);
452 		if (err)
453 			goto errout;
454 	}
455 
456 	if (st_ops->init(btf)) {
457 		pr_warn("Error in init bpf_struct_ops %s\n",
458 			st_ops->name);
459 		err = -EINVAL;
460 		goto errout;
461 	}
462 
463 	return 0;
464 
465 errout:
466 	bpf_struct_ops_desc_release(st_ops_desc);
467 
468 	return err;
469 }
470 
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)471 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
472 					   void *next_key)
473 {
474 	if (key && *(u32 *)key == 0)
475 		return -ENOENT;
476 
477 	*(u32 *)next_key = 0;
478 	return 0;
479 }
480 
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)481 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
482 				       void *value)
483 {
484 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
485 	struct bpf_struct_ops_value *uvalue, *kvalue;
486 	enum bpf_struct_ops_state state;
487 	s64 refcnt;
488 
489 	if (unlikely(*(u32 *)key != 0))
490 		return -ENOENT;
491 
492 	kvalue = &st_map->kvalue;
493 	/* Pair with smp_store_release() during map_update */
494 	state = smp_load_acquire(&kvalue->common.state);
495 	if (state == BPF_STRUCT_OPS_STATE_INIT) {
496 		memset(value, 0, map->value_size);
497 		return 0;
498 	}
499 
500 	/* No lock is needed.  state and refcnt do not need
501 	 * to be updated together under atomic context.
502 	 */
503 	uvalue = value;
504 	memcpy(uvalue, st_map->uvalue, map->value_size);
505 	uvalue->common.state = state;
506 
507 	/* This value offers the user space a general estimate of how
508 	 * many sockets are still utilizing this struct_ops for TCP
509 	 * congestion control. The number might not be exact, but it
510 	 * should sufficiently meet our present goals.
511 	 */
512 	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
513 	refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
514 
515 	return 0;
516 }
517 
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)518 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
519 {
520 	return ERR_PTR(-EINVAL);
521 }
522 
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)523 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
524 {
525 	u32 i;
526 
527 	for (i = 0; i < st_map->funcs_cnt; i++) {
528 		if (!st_map->links[i])
529 			break;
530 		bpf_link_put(st_map->links[i]);
531 		st_map->links[i] = NULL;
532 	}
533 }
534 
bpf_struct_ops_map_dissoc_progs(struct bpf_struct_ops_map * st_map)535 static void bpf_struct_ops_map_dissoc_progs(struct bpf_struct_ops_map *st_map)
536 {
537 	u32 i;
538 
539 	for (i = 0; i < st_map->funcs_cnt; i++) {
540 		if (!st_map->links[i])
541 			break;
542 		bpf_prog_disassoc_struct_ops(st_map->links[i]->prog);
543 	}
544 }
545 
bpf_struct_ops_map_free_image(struct bpf_struct_ops_map * st_map)546 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
547 {
548 	int i;
549 
550 	for (i = 0; i < st_map->image_pages_cnt; i++)
551 		bpf_struct_ops_image_free(st_map->image_pages[i]);
552 	st_map->image_pages_cnt = 0;
553 }
554 
check_zero_holes(const struct btf * btf,const struct btf_type * t,void * data)555 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
556 {
557 	const struct btf_member *member;
558 	u32 i, moff, msize, prev_mend = 0;
559 	const struct btf_type *mtype;
560 
561 	for_each_member(i, t, member) {
562 		moff = __btf_member_bit_offset(t, member) / 8;
563 		if (moff > prev_mend &&
564 		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
565 			return -EINVAL;
566 
567 		mtype = btf_type_by_id(btf, member->type);
568 		mtype = btf_resolve_size(btf, mtype, &msize);
569 		if (IS_ERR(mtype))
570 			return PTR_ERR(mtype);
571 		prev_mend = moff + msize;
572 	}
573 
574 	if (t->size > prev_mend &&
575 	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
576 		return -EINVAL;
577 
578 	return 0;
579 }
580 
bpf_struct_ops_link_release(struct bpf_link * link)581 static void bpf_struct_ops_link_release(struct bpf_link *link)
582 {
583 }
584 
bpf_struct_ops_link_dealloc(struct bpf_link * link)585 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
586 {
587 	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
588 
589 	kfree(tlink);
590 }
591 
592 const struct bpf_link_ops bpf_struct_ops_link_lops = {
593 	.release = bpf_struct_ops_link_release,
594 	.dealloc = bpf_struct_ops_link_dealloc,
595 };
596 
bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links * tlinks,struct bpf_tramp_link * link,const struct btf_func_model * model,void * stub_func,void ** _image,u32 * _image_off,bool allow_alloc)597 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
598 				      struct bpf_tramp_link *link,
599 				      const struct btf_func_model *model,
600 				      void *stub_func,
601 				      void **_image, u32 *_image_off,
602 				      bool allow_alloc)
603 {
604 	u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
605 	void *image = *_image;
606 	int size;
607 
608 	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
609 	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
610 
611 	if (model->ret_size > 0)
612 		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
613 
614 	size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func);
615 	if (size <= 0)
616 		return size ? : -EFAULT;
617 
618 	/* Allocate image buffer if necessary */
619 	if (!image || size > PAGE_SIZE - image_off) {
620 		if (!allow_alloc)
621 			return -E2BIG;
622 
623 		image = bpf_struct_ops_image_alloc();
624 		if (IS_ERR(image))
625 			return PTR_ERR(image);
626 		image_off = 0;
627 	}
628 
629 	size = arch_prepare_bpf_trampoline(NULL, image + image_off,
630 					   image + image_off + size,
631 					   model, flags, tlinks, stub_func);
632 	if (size <= 0) {
633 		if (image != *_image)
634 			bpf_struct_ops_image_free(image);
635 		return size ? : -EFAULT;
636 	}
637 
638 	*_image = image;
639 	*_image_off = image_off + size;
640 	return 0;
641 }
642 
bpf_struct_ops_ksym_init(const char * tname,const char * mname,void * image,unsigned int size,struct bpf_ksym * ksym)643 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
644 				     void *image, unsigned int size,
645 				     struct bpf_ksym *ksym)
646 {
647 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
648 	INIT_LIST_HEAD_RCU(&ksym->lnode);
649 	bpf_image_ksym_init(image, size, ksym);
650 }
651 
bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map * st_map)652 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
653 {
654 	u32 i;
655 
656 	for (i = 0; i < st_map->funcs_cnt; i++) {
657 		if (!st_map->ksyms[i])
658 			break;
659 		bpf_image_ksym_add(st_map->ksyms[i]);
660 	}
661 }
662 
bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map * st_map)663 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
664 {
665 	u32 i;
666 
667 	for (i = 0; i < st_map->funcs_cnt; i++) {
668 		if (!st_map->ksyms[i])
669 			break;
670 		bpf_image_ksym_del(st_map->ksyms[i]);
671 	}
672 }
673 
bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map * st_map)674 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
675 {
676 	u32 i;
677 
678 	for (i = 0; i < st_map->funcs_cnt; i++) {
679 		if (!st_map->ksyms[i])
680 			break;
681 		kfree(st_map->ksyms[i]);
682 		st_map->ksyms[i] = NULL;
683 	}
684 }
685 
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)686 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
687 					   void *value, u64 flags)
688 {
689 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
690 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
691 	const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
692 	struct bpf_struct_ops_value *uvalue, *kvalue;
693 	const struct btf_type *module_type;
694 	const struct btf_member *member;
695 	const struct btf_type *t = st_ops_desc->type;
696 	struct bpf_tramp_links *tlinks;
697 	void *udata, *kdata;
698 	int prog_fd, err;
699 	u32 i, trampoline_start, image_off = 0;
700 	void *cur_image = NULL, *image = NULL;
701 	struct bpf_link **plink;
702 	struct bpf_ksym **pksym;
703 	const char *tname, *mname;
704 
705 	if (flags)
706 		return -EINVAL;
707 
708 	if (*(u32 *)key != 0)
709 		return -E2BIG;
710 
711 	err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
712 	if (err)
713 		return err;
714 
715 	uvalue = value;
716 	err = check_zero_holes(st_map->btf, t, uvalue->data);
717 	if (err)
718 		return err;
719 
720 	if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
721 		return -EINVAL;
722 
723 	tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX, GFP_KERNEL);
724 	if (!tlinks)
725 		return -ENOMEM;
726 
727 	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
728 	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
729 
730 	mutex_lock(&st_map->lock);
731 
732 	if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
733 		err = -EBUSY;
734 		goto unlock;
735 	}
736 
737 	memcpy(uvalue, value, map->value_size);
738 
739 	udata = &uvalue->data;
740 	kdata = &kvalue->data;
741 
742 	plink = st_map->links;
743 	pksym = st_map->ksyms;
744 	tname = btf_name_by_offset(st_map->btf, t->name_off);
745 	module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
746 	for_each_member(i, t, member) {
747 		const struct btf_type *mtype, *ptype;
748 		struct bpf_prog *prog;
749 		struct bpf_tramp_link *link;
750 		struct bpf_ksym *ksym;
751 		u32 moff;
752 
753 		moff = __btf_member_bit_offset(t, member) / 8;
754 		mname = btf_name_by_offset(st_map->btf, member->name_off);
755 		ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
756 		if (ptype == module_type) {
757 			if (*(void **)(udata + moff))
758 				goto reset_unlock;
759 			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
760 			continue;
761 		}
762 
763 		err = st_ops->init_member(t, member, kdata, udata);
764 		if (err < 0)
765 			goto reset_unlock;
766 
767 		/* The ->init_member() has handled this member */
768 		if (err > 0)
769 			continue;
770 
771 		/* If st_ops->init_member does not handle it,
772 		 * we will only handle func ptrs and zero-ed members
773 		 * here.  Reject everything else.
774 		 */
775 
776 		/* All non func ptr member must be 0 */
777 		if (!ptype || !btf_type_is_func_proto(ptype)) {
778 			u32 msize;
779 
780 			mtype = btf_type_by_id(st_map->btf, member->type);
781 			mtype = btf_resolve_size(st_map->btf, mtype, &msize);
782 			if (IS_ERR(mtype)) {
783 				err = PTR_ERR(mtype);
784 				goto reset_unlock;
785 			}
786 
787 			if (memchr_inv(udata + moff, 0, msize)) {
788 				err = -EINVAL;
789 				goto reset_unlock;
790 			}
791 
792 			continue;
793 		}
794 
795 		prog_fd = (int)(*(unsigned long *)(udata + moff));
796 		/* Similar check as the attr->attach_prog_fd */
797 		if (!prog_fd)
798 			continue;
799 
800 		prog = bpf_prog_get(prog_fd);
801 		if (IS_ERR(prog)) {
802 			err = PTR_ERR(prog);
803 			goto reset_unlock;
804 		}
805 
806 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
807 		    prog->aux->attach_btf_id != st_ops_desc->type_id ||
808 		    prog->expected_attach_type != i) {
809 			bpf_prog_put(prog);
810 			err = -EINVAL;
811 			goto reset_unlock;
812 		}
813 
814 		/* Poison pointer on error instead of return for backward compatibility */
815 		bpf_prog_assoc_struct_ops(prog, &st_map->map);
816 
817 		link = kzalloc_obj(*link, GFP_USER);
818 		if (!link) {
819 			bpf_prog_put(prog);
820 			err = -ENOMEM;
821 			goto reset_unlock;
822 		}
823 		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
824 			      &bpf_struct_ops_link_lops, prog, prog->expected_attach_type);
825 		*plink++ = &link->link;
826 
827 		ksym = kzalloc_obj(*ksym, GFP_USER);
828 		if (!ksym) {
829 			err = -ENOMEM;
830 			goto reset_unlock;
831 		}
832 		*pksym++ = ksym;
833 
834 		trampoline_start = image_off;
835 		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
836 						&st_ops->func_models[i],
837 						*(void **)(st_ops->cfi_stubs + moff),
838 						&image, &image_off,
839 						st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
840 		if (err)
841 			goto reset_unlock;
842 
843 		if (cur_image != image) {
844 			st_map->image_pages[st_map->image_pages_cnt++] = image;
845 			cur_image = image;
846 			trampoline_start = 0;
847 		}
848 
849 		*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
850 
851 		/* put prog_id to udata */
852 		*(unsigned long *)(udata + moff) = prog->aux->id;
853 
854 		/* init ksym for this trampoline */
855 		bpf_struct_ops_ksym_init(tname, mname,
856 					 image + trampoline_start,
857 					 image_off - trampoline_start,
858 					 ksym);
859 	}
860 
861 	if (st_ops->validate) {
862 		err = st_ops->validate(kdata);
863 		if (err)
864 			goto reset_unlock;
865 	}
866 	for (i = 0; i < st_map->image_pages_cnt; i++) {
867 		err = arch_protect_bpf_trampoline(st_map->image_pages[i],
868 						  PAGE_SIZE);
869 		if (err)
870 			goto reset_unlock;
871 	}
872 
873 	if (st_map->map.map_flags & BPF_F_LINK) {
874 		err = 0;
875 		/* Let bpf_link handle registration & unregistration.
876 		 *
877 		 * Pair with smp_load_acquire() during lookup_elem().
878 		 */
879 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
880 		goto unlock;
881 	}
882 
883 	err = st_ops->reg(kdata, NULL);
884 	if (likely(!err)) {
885 		/* This refcnt increment on the map here after
886 		 * 'st_ops->reg()' is secure since the state of the
887 		 * map must be set to INIT at this moment, and thus
888 		 * bpf_struct_ops_map_delete_elem() can't unregister
889 		 * or transition it to TOBEFREE concurrently.
890 		 */
891 		bpf_map_inc(map);
892 		/* Pair with smp_load_acquire() during lookup_elem().
893 		 * It ensures the above udata updates (e.g. prog->aux->id)
894 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
895 		 */
896 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
897 		goto unlock;
898 	}
899 
900 	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
901 	 * verified as a whole, after all init_member() calls. Can also happen if
902 	 * there was a race in registering the struct_ops (under the same name) to
903 	 * a sub-system through different struct_ops's maps.
904 	 */
905 
906 reset_unlock:
907 	bpf_struct_ops_map_free_ksyms(st_map);
908 	bpf_struct_ops_map_free_image(st_map);
909 	bpf_struct_ops_map_put_progs(st_map);
910 	memset(uvalue, 0, map->value_size);
911 	memset(kvalue, 0, map->value_size);
912 unlock:
913 	kfree(tlinks);
914 	mutex_unlock(&st_map->lock);
915 	if (!err)
916 		bpf_struct_ops_map_add_ksyms(st_map);
917 	return err;
918 }
919 
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)920 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
921 {
922 	enum bpf_struct_ops_state prev_state;
923 	struct bpf_struct_ops_map *st_map;
924 
925 	st_map = (struct bpf_struct_ops_map *)map;
926 	if (st_map->map.map_flags & BPF_F_LINK)
927 		return -EOPNOTSUPP;
928 
929 	prev_state = cmpxchg(&st_map->kvalue.common.state,
930 			     BPF_STRUCT_OPS_STATE_INUSE,
931 			     BPF_STRUCT_OPS_STATE_TOBEFREE);
932 	switch (prev_state) {
933 	case BPF_STRUCT_OPS_STATE_INUSE:
934 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
935 		bpf_map_put(map);
936 		return 0;
937 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
938 		return -EINPROGRESS;
939 	case BPF_STRUCT_OPS_STATE_INIT:
940 		return -ENOENT;
941 	default:
942 		WARN_ON_ONCE(1);
943 		/* Should never happen.  Treat it as not found. */
944 		return -ENOENT;
945 	}
946 }
947 
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)948 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
949 					     struct seq_file *m)
950 {
951 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
952 	void *value;
953 	int err;
954 
955 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
956 	if (!value)
957 		return;
958 
959 	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
960 	if (!err) {
961 		btf_type_seq_show(st_map->btf,
962 				  map->btf_vmlinux_value_type_id,
963 				  value, m);
964 		seq_putc(m, '\n');
965 	}
966 
967 	kfree(value);
968 }
969 
__bpf_struct_ops_map_free(struct bpf_map * map)970 static void __bpf_struct_ops_map_free(struct bpf_map *map)
971 {
972 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
973 
974 	if (st_map->links)
975 		bpf_struct_ops_map_put_progs(st_map);
976 	if (st_map->ksyms)
977 		bpf_struct_ops_map_free_ksyms(st_map);
978 	bpf_map_area_free(st_map->links);
979 	bpf_map_area_free(st_map->ksyms);
980 	bpf_struct_ops_map_free_image(st_map);
981 	bpf_map_area_free(st_map->uvalue);
982 	bpf_map_area_free(st_map);
983 }
984 
bpf_struct_ops_map_free(struct bpf_map * map)985 static void bpf_struct_ops_map_free(struct bpf_map *map)
986 {
987 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
988 
989 	/* st_ops->owner was acquired during map_alloc to implicitly holds
990 	 * the btf's refcnt. The acquire was only done when btf_is_module()
991 	 * st_map->btf cannot be NULL here.
992 	 */
993 	if (btf_is_module(st_map->btf))
994 		module_put(st_map->st_ops_desc->st_ops->owner);
995 
996 	bpf_struct_ops_map_dissoc_progs(st_map);
997 
998 	bpf_struct_ops_map_del_ksyms(st_map);
999 
1000 	/* The struct_ops's function may switch to another struct_ops.
1001 	 *
1002 	 * For example, bpf_tcp_cc_x->init() may switch to
1003 	 * another tcp_cc_y by calling
1004 	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
1005 	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
1006 	 * and its refcount may reach 0 which then free its
1007 	 * trampoline image while tcp_cc_x is still running.
1008 	 *
1009 	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
1010 	 * to finish. bpf-tcp-cc prog is non sleepable.
1011 	 * A rcu_tasks gp is to wait for the last few insn
1012 	 * in the tramopline image to finish before releasing
1013 	 * the trampoline image.
1014 	 */
1015 	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
1016 
1017 	__bpf_struct_ops_map_free(map);
1018 }
1019 
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)1020 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
1021 {
1022 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
1023 	    (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
1024 	    !attr->btf_vmlinux_value_type_id)
1025 		return -EINVAL;
1026 	return 0;
1027 }
1028 
count_func_ptrs(const struct btf * btf,const struct btf_type * t)1029 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
1030 {
1031 	int i;
1032 	u32 count;
1033 	const struct btf_member *member;
1034 
1035 	count = 0;
1036 	for_each_member(i, t, member)
1037 		if (btf_type_resolve_func_ptr(btf, member->type, NULL))
1038 			count++;
1039 	return count;
1040 }
1041 
bpf_struct_ops_map_alloc(union bpf_attr * attr)1042 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1043 {
1044 	const struct bpf_struct_ops_desc *st_ops_desc;
1045 	size_t st_map_size;
1046 	struct bpf_struct_ops_map *st_map;
1047 	const struct btf_type *t, *vt;
1048 	struct module *mod = NULL;
1049 	struct bpf_map *map;
1050 	struct btf *btf;
1051 	int ret;
1052 
1053 	if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1054 		/* The map holds btf for its whole life time. */
1055 		btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1056 		if (IS_ERR(btf))
1057 			return ERR_CAST(btf);
1058 		if (!btf_is_module(btf)) {
1059 			btf_put(btf);
1060 			return ERR_PTR(-EINVAL);
1061 		}
1062 
1063 		mod = btf_try_get_module(btf);
1064 		/* mod holds a refcnt to btf. We don't need an extra refcnt
1065 		 * here.
1066 		 */
1067 		btf_put(btf);
1068 		if (!mod)
1069 			return ERR_PTR(-EINVAL);
1070 	} else {
1071 		btf = bpf_get_btf_vmlinux();
1072 		if (IS_ERR(btf))
1073 			return ERR_CAST(btf);
1074 		if (!btf)
1075 			return ERR_PTR(-ENOTSUPP);
1076 	}
1077 
1078 	st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1079 	if (!st_ops_desc) {
1080 		ret = -ENOTSUPP;
1081 		goto errout;
1082 	}
1083 
1084 	vt = st_ops_desc->value_type;
1085 	if (attr->value_size != vt->size) {
1086 		ret = -EINVAL;
1087 		goto errout;
1088 	}
1089 
1090 	t = st_ops_desc->type;
1091 
1092 	st_map_size = sizeof(*st_map) +
1093 		/* kvalue stores the
1094 		 * struct bpf_struct_ops_tcp_congestions_ops
1095 		 */
1096 		(vt->size - sizeof(struct bpf_struct_ops_value));
1097 
1098 	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1099 	if (!st_map) {
1100 		ret = -ENOMEM;
1101 		goto errout;
1102 	}
1103 
1104 	st_map->st_ops_desc = st_ops_desc;
1105 	map = &st_map->map;
1106 
1107 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1108 	st_map->funcs_cnt = count_func_ptrs(btf, t);
1109 	st_map->links =
1110 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1111 				   NUMA_NO_NODE);
1112 
1113 	st_map->ksyms =
1114 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1115 				   NUMA_NO_NODE);
1116 	if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1117 		ret = -ENOMEM;
1118 		goto errout_free;
1119 	}
1120 	st_map->btf = btf;
1121 
1122 	mutex_init(&st_map->lock);
1123 	bpf_map_init_from_attr(map, attr);
1124 
1125 	return map;
1126 
1127 errout_free:
1128 	__bpf_struct_ops_map_free(map);
1129 errout:
1130 	module_put(mod);
1131 
1132 	return ERR_PTR(ret);
1133 }
1134 
bpf_struct_ops_map_mem_usage(const struct bpf_map * map)1135 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1136 {
1137 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1138 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1139 	const struct btf_type *vt = st_ops_desc->value_type;
1140 	u64 usage;
1141 
1142 	usage = sizeof(*st_map) +
1143 			vt->size - sizeof(struct bpf_struct_ops_value);
1144 	usage += vt->size;
1145 	usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1146 	usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1147 	usage += PAGE_SIZE;
1148 	return usage;
1149 }
1150 
1151 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1152 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1153 	.map_alloc_check = bpf_struct_ops_map_alloc_check,
1154 	.map_alloc = bpf_struct_ops_map_alloc,
1155 	.map_free = bpf_struct_ops_map_free,
1156 	.map_get_next_key = bpf_struct_ops_map_get_next_key,
1157 	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1158 	.map_delete_elem = bpf_struct_ops_map_delete_elem,
1159 	.map_update_elem = bpf_struct_ops_map_update_elem,
1160 	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1161 	.map_mem_usage = bpf_struct_ops_map_mem_usage,
1162 	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1163 };
1164 
1165 /* "const void *" because some subsystem is
1166  * passing a const (e.g. const struct tcp_congestion_ops *)
1167  */
bpf_struct_ops_get(const void * kdata)1168 bool bpf_struct_ops_get(const void *kdata)
1169 {
1170 	struct bpf_struct_ops_value *kvalue;
1171 	struct bpf_struct_ops_map *st_map;
1172 	struct bpf_map *map;
1173 
1174 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1175 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1176 
1177 	map = __bpf_map_inc_not_zero(&st_map->map, false);
1178 	return !IS_ERR(map);
1179 }
1180 EXPORT_SYMBOL_GPL(bpf_struct_ops_get);
1181 
bpf_struct_ops_put(const void * kdata)1182 void bpf_struct_ops_put(const void *kdata)
1183 {
1184 	struct bpf_struct_ops_value *kvalue;
1185 	struct bpf_struct_ops_map *st_map;
1186 
1187 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1188 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1189 
1190 	bpf_map_put(&st_map->map);
1191 }
1192 EXPORT_SYMBOL_GPL(bpf_struct_ops_put);
1193 
bpf_struct_ops_id(const void * kdata)1194 u32 bpf_struct_ops_id(const void *kdata)
1195 {
1196 	struct bpf_struct_ops_value *kvalue;
1197 	struct bpf_struct_ops_map *st_map;
1198 
1199 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1200 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1201 
1202 	return st_map->map.id;
1203 }
1204 EXPORT_SYMBOL_GPL(bpf_struct_ops_id);
1205 
bpf_struct_ops_valid_to_reg(struct bpf_map * map)1206 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1207 {
1208 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1209 
1210 	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1211 		map->map_flags & BPF_F_LINK &&
1212 		/* Pair with smp_store_release() during map_update */
1213 		smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1214 }
1215 
bpf_struct_ops_map_link_dealloc(struct bpf_link * link)1216 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1217 {
1218 	struct bpf_struct_ops_link *st_link;
1219 	struct bpf_struct_ops_map *st_map;
1220 
1221 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1222 	st_map = (struct bpf_struct_ops_map *)
1223 		rcu_dereference_protected(st_link->map, true);
1224 	if (st_map) {
1225 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1226 		bpf_map_put(&st_map->map);
1227 	}
1228 	kfree(st_link);
1229 }
1230 
bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1231 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1232 					    struct seq_file *seq)
1233 {
1234 	struct bpf_struct_ops_link *st_link;
1235 	struct bpf_map *map;
1236 
1237 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1238 	rcu_read_lock();
1239 	map = rcu_dereference(st_link->map);
1240 	if (map)
1241 		seq_printf(seq, "map_id:\t%d\n", map->id);
1242 	rcu_read_unlock();
1243 }
1244 
bpf_struct_ops_map_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1245 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1246 					       struct bpf_link_info *info)
1247 {
1248 	struct bpf_struct_ops_link *st_link;
1249 	struct bpf_map *map;
1250 
1251 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1252 	rcu_read_lock();
1253 	map = rcu_dereference(st_link->map);
1254 	if (map)
1255 		info->struct_ops.map_id = map->id;
1256 	rcu_read_unlock();
1257 	return 0;
1258 }
1259 
bpf_struct_ops_map_link_update(struct bpf_link * link,struct bpf_map * new_map,struct bpf_map * expected_old_map)1260 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1261 					  struct bpf_map *expected_old_map)
1262 {
1263 	struct bpf_struct_ops_map *st_map, *old_st_map;
1264 	struct bpf_map *old_map;
1265 	struct bpf_struct_ops_link *st_link;
1266 	int err;
1267 
1268 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1269 	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1270 
1271 	if (!bpf_struct_ops_valid_to_reg(new_map))
1272 		return -EINVAL;
1273 
1274 	if (!st_map->st_ops_desc->st_ops->update)
1275 		return -EOPNOTSUPP;
1276 
1277 	mutex_lock(&update_mutex);
1278 
1279 	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1280 	if (!old_map) {
1281 		err = -ENOLINK;
1282 		goto err_out;
1283 	}
1284 	if (expected_old_map && old_map != expected_old_map) {
1285 		err = -EPERM;
1286 		goto err_out;
1287 	}
1288 
1289 	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1290 	/* The new and old struct_ops must be the same type. */
1291 	if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1292 		err = -EINVAL;
1293 		goto err_out;
1294 	}
1295 
1296 	err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1297 	if (err)
1298 		goto err_out;
1299 
1300 	bpf_map_inc(new_map);
1301 	rcu_assign_pointer(st_link->map, new_map);
1302 	bpf_map_put(old_map);
1303 
1304 err_out:
1305 	mutex_unlock(&update_mutex);
1306 
1307 	return err;
1308 }
1309 
bpf_struct_ops_map_link_detach(struct bpf_link * link)1310 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1311 {
1312 	struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1313 	struct bpf_struct_ops_map *st_map;
1314 	struct bpf_map *map;
1315 
1316 	mutex_lock(&update_mutex);
1317 
1318 	map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1319 	if (!map) {
1320 		mutex_unlock(&update_mutex);
1321 		return 0;
1322 	}
1323 	st_map = container_of(map, struct bpf_struct_ops_map, map);
1324 
1325 	st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1326 
1327 	RCU_INIT_POINTER(st_link->map, NULL);
1328 	/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1329 	 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1330 	 */
1331 	bpf_map_put(&st_map->map);
1332 
1333 	mutex_unlock(&update_mutex);
1334 
1335 	wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1336 
1337 	return 0;
1338 }
1339 
bpf_struct_ops_map_link_poll(struct file * file,struct poll_table_struct * pts)1340 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1341 					     struct poll_table_struct *pts)
1342 {
1343 	struct bpf_struct_ops_link *st_link = file->private_data;
1344 
1345 	poll_wait(file, &st_link->wait_hup, pts);
1346 
1347 	return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1348 }
1349 
1350 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1351 	.dealloc = bpf_struct_ops_map_link_dealloc,
1352 	.detach = bpf_struct_ops_map_link_detach,
1353 	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1354 	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1355 	.update_map = bpf_struct_ops_map_link_update,
1356 	.poll = bpf_struct_ops_map_link_poll,
1357 };
1358 
bpf_struct_ops_link_create(union bpf_attr * attr)1359 int bpf_struct_ops_link_create(union bpf_attr *attr)
1360 {
1361 	struct bpf_struct_ops_link *link = NULL;
1362 	struct bpf_link_primer link_primer;
1363 	struct bpf_struct_ops_map *st_map;
1364 	struct bpf_map *map;
1365 	int err;
1366 
1367 	map = bpf_map_get(attr->link_create.map_fd);
1368 	if (IS_ERR(map))
1369 		return PTR_ERR(map);
1370 
1371 	st_map = (struct bpf_struct_ops_map *)map;
1372 
1373 	if (!bpf_struct_ops_valid_to_reg(map)) {
1374 		err = -EINVAL;
1375 		goto err_out;
1376 	}
1377 
1378 	link = kzalloc_obj(*link, GFP_USER);
1379 	if (!link) {
1380 		err = -ENOMEM;
1381 		goto err_out;
1382 	}
1383 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL,
1384 		      attr->link_create.attach_type);
1385 
1386 	err = bpf_link_prime(&link->link, &link_primer);
1387 	if (err)
1388 		goto err_out;
1389 
1390 	init_waitqueue_head(&link->wait_hup);
1391 
1392 	/* Hold the update_mutex such that the subsystem cannot
1393 	 * do link->ops->detach() before the link is fully initialized.
1394 	 */
1395 	mutex_lock(&update_mutex);
1396 	err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1397 	if (err) {
1398 		mutex_unlock(&update_mutex);
1399 		bpf_link_cleanup(&link_primer);
1400 		link = NULL;
1401 		goto err_out;
1402 	}
1403 	RCU_INIT_POINTER(link->map, map);
1404 	mutex_unlock(&update_mutex);
1405 
1406 	return bpf_link_settle(&link_primer);
1407 
1408 err_out:
1409 	bpf_map_put(map);
1410 	kfree(link);
1411 	return err;
1412 }
1413 
bpf_prog_assoc_struct_ops(struct bpf_prog * prog,struct bpf_map * map)1414 int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
1415 {
1416 	struct bpf_map *st_ops_assoc;
1417 
1418 	guard(mutex)(&prog->aux->st_ops_assoc_mutex);
1419 
1420 	st_ops_assoc = rcu_dereference_protected(prog->aux->st_ops_assoc,
1421 						 lockdep_is_held(&prog->aux->st_ops_assoc_mutex));
1422 	if (st_ops_assoc && st_ops_assoc == map)
1423 		return 0;
1424 
1425 	if (st_ops_assoc) {
1426 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1427 			return -EBUSY;
1428 
1429 		rcu_assign_pointer(prog->aux->st_ops_assoc, BPF_PTR_POISON);
1430 	} else {
1431 		/*
1432 		 * struct_ops map does not track associated non-struct_ops programs.
1433 		 * Bump the refcount to make sure st_ops_assoc is always valid.
1434 		 */
1435 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1436 			bpf_map_inc(map);
1437 
1438 		rcu_assign_pointer(prog->aux->st_ops_assoc, map);
1439 	}
1440 
1441 	return 0;
1442 }
1443 
bpf_prog_disassoc_struct_ops(struct bpf_prog * prog)1444 void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog)
1445 {
1446 	struct bpf_map *st_ops_assoc;
1447 
1448 	guard(mutex)(&prog->aux->st_ops_assoc_mutex);
1449 
1450 	st_ops_assoc = rcu_dereference_protected(prog->aux->st_ops_assoc,
1451 						 lockdep_is_held(&prog->aux->st_ops_assoc_mutex));
1452 	if (!st_ops_assoc || st_ops_assoc == BPF_PTR_POISON)
1453 		return;
1454 
1455 	if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1456 		bpf_map_put(st_ops_assoc);
1457 
1458 	RCU_INIT_POINTER(prog->aux->st_ops_assoc, NULL);
1459 }
1460 
1461 /*
1462  * Get a reference to the struct_ops struct (i.e., kdata) associated with a
1463  * program. Should only be called in BPF program context (e.g., in a kfunc).
1464  *
1465  * If the returned pointer is not NULL, it must points to a valid struct_ops.
1466  * The struct_ops map is not guaranteed to be initialized nor attached.
1467  * Kernel struct_ops implementers are responsible for tracking and checking
1468  * the state of the struct_ops if the use case requires an initialized or
1469  * attached struct_ops.
1470  */
bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux * aux)1471 void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux)
1472 {
1473 	struct bpf_struct_ops_map *st_map;
1474 	struct bpf_map *st_ops_assoc;
1475 
1476 	st_ops_assoc = rcu_dereference_check(aux->st_ops_assoc, bpf_rcu_lock_held());
1477 	if (!st_ops_assoc || st_ops_assoc == BPF_PTR_POISON)
1478 		return NULL;
1479 
1480 	st_map = (struct bpf_struct_ops_map *)st_ops_assoc;
1481 
1482 	return &st_map->kvalue.data;
1483 }
1484 EXPORT_SYMBOL_GPL(bpf_prog_get_assoc_struct_ops);
1485 
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)1486 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1487 {
1488 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1489 
1490 	info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1491 }
1492