xref: /linux/kernel/bpf/bpf_struct_ops.c (revision d0d106a2bd21499901299160744e5fe9f4c83ddb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16 
17 struct bpf_struct_ops_value {
18 	struct bpf_struct_ops_common_value common;
19 	char data[] ____cacheline_aligned_in_smp;
20 };
21 
22 #define MAX_TRAMP_IMAGE_PAGES 8
23 
24 struct bpf_struct_ops_map {
25 	struct bpf_map map;
26 	const struct bpf_struct_ops_desc *st_ops_desc;
27 	/* protect map_update */
28 	struct mutex lock;
29 	/* link has all the bpf_links that is populated
30 	 * to the func ptr of the kernel's struct
31 	 * (in kvalue.data).
32 	 */
33 	struct bpf_link **links;
34 	/* ksyms for bpf trampolines */
35 	struct bpf_ksym **ksyms;
36 	u32 funcs_cnt;
37 	u32 image_pages_cnt;
38 	/* image_pages is an array of pages that has all the trampolines
39 	 * that stores the func args before calling the bpf_prog.
40 	 */
41 	void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 	/* The owner moduler's btf. */
43 	struct btf *btf;
44 	/* uvalue->data stores the kernel struct
45 	 * (e.g. tcp_congestion_ops) that is more useful
46 	 * to userspace than the kvalue.  For example,
47 	 * the bpf_prog's id is stored instead of the kernel
48 	 * address of a func ptr.
49 	 */
50 	struct bpf_struct_ops_value *uvalue;
51 	/* kvalue.data stores the actual kernel's struct
52 	 * (e.g. tcp_congestion_ops) that will be
53 	 * registered to the kernel subsystem.
54 	 */
55 	struct bpf_struct_ops_value kvalue;
56 };
57 
58 struct bpf_struct_ops_link {
59 	struct bpf_link link;
60 	struct bpf_map __rcu *map;
61 	wait_queue_head_t wait_hup;
62 };
63 
64 static DEFINE_MUTEX(update_mutex);
65 
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
68 
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
70 };
71 
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
73 #ifdef CONFIG_NET
74 	.test_run = bpf_struct_ops_test_run,
75 #endif
76 };
77 
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
81 
82 enum {
83 	IDX_MODULE_ID,
84 	IDX_ST_OPS_COMMON_VALUE_ID,
85 };
86 
87 extern struct btf *btf_vmlinux;
88 
is_valid_value_type(struct btf * btf,s32 value_id,const struct btf_type * type,const char * value_name)89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 				const struct btf_type *type,
91 				const char *value_name)
92 {
93 	const struct btf_type *common_value_type;
94 	const struct btf_member *member;
95 	const struct btf_type *vt, *mt;
96 
97 	vt = btf_type_by_id(btf, value_id);
98 	if (btf_vlen(vt) != 2) {
99 		pr_warn("The number of %s's members should be 2, but we get %d\n",
100 			value_name, btf_vlen(vt));
101 		return false;
102 	}
103 	member = btf_type_member(vt);
104 	mt = btf_type_by_id(btf, member->type);
105 	common_value_type = btf_type_by_id(btf_vmlinux,
106 					   st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 	if (mt != common_value_type) {
108 		pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
109 			value_name);
110 		return false;
111 	}
112 	member++;
113 	mt = btf_type_by_id(btf, member->type);
114 	if (mt != type) {
115 		pr_warn("The second member of %s should be %s\n",
116 			value_name, btf_name_by_offset(btf, type->name_off));
117 		return false;
118 	}
119 
120 	return true;
121 }
122 
bpf_struct_ops_image_alloc(void)123 static void *bpf_struct_ops_image_alloc(void)
124 {
125 	void *image;
126 	int err;
127 
128 	err = bpf_jit_charge_modmem(PAGE_SIZE);
129 	if (err)
130 		return ERR_PTR(err);
131 	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
132 	if (!image) {
133 		bpf_jit_uncharge_modmem(PAGE_SIZE);
134 		return ERR_PTR(-ENOMEM);
135 	}
136 
137 	return image;
138 }
139 
bpf_struct_ops_image_free(void * image)140 void bpf_struct_ops_image_free(void *image)
141 {
142 	if (image) {
143 		arch_free_bpf_trampoline(image, PAGE_SIZE);
144 		bpf_jit_uncharge_modmem(PAGE_SIZE);
145 	}
146 }
147 
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define MAX_STUB_NAME 128
150 
151 /* Return the type info of a stub function, if it exists.
152  *
153  * The name of a stub function is made up of the name of the struct_ops and
154  * the name of the function pointer member, separated by "__". For example,
155  * if the struct_ops type is named "foo_ops" and the function pointer
156  * member is named "bar", the stub function name would be "foo_ops__bar".
157  */
158 static const struct btf_type *
find_stub_func_proto(const struct btf * btf,const char * st_op_name,const char * member_name)159 find_stub_func_proto(const struct btf *btf, const char *st_op_name,
160 		     const char *member_name)
161 {
162 	char stub_func_name[MAX_STUB_NAME];
163 	const struct btf_type *func_type;
164 	s32 btf_id;
165 	int cp;
166 
167 	cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
168 		      st_op_name, member_name);
169 	if (cp >= MAX_STUB_NAME) {
170 		pr_warn("Stub function name too long\n");
171 		return NULL;
172 	}
173 	btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
174 	if (btf_id < 0)
175 		return NULL;
176 	func_type = btf_type_by_id(btf, btf_id);
177 	if (!func_type)
178 		return NULL;
179 
180 	return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
181 }
182 
183 /* Prepare argument info for every nullable argument of a member of a
184  * struct_ops type.
185  *
186  * Initialize a struct bpf_struct_ops_arg_info according to type info of
187  * the arguments of a stub function. (Check kCFI for more information about
188  * stub functions.)
189  *
190  * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
191  * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
192  * the information that used by the verifier to check the arguments of the
193  * BPF struct_ops program assigned to the member. Here, we only care about
194  * the arguments that are marked as __nullable.
195  *
196  * The array of struct bpf_ctx_arg_aux is eventually assigned to
197  * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
198  * verifier. (See check_struct_ops_btf_id())
199  *
200  * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
201  * fails, it will be kept untouched.
202  */
prepare_arg_info(struct btf * btf,const char * st_ops_name,const char * member_name,const struct btf_type * func_proto,struct bpf_struct_ops_arg_info * arg_info)203 static int prepare_arg_info(struct btf *btf,
204 			    const char *st_ops_name,
205 			    const char *member_name,
206 			    const struct btf_type *func_proto,
207 			    struct bpf_struct_ops_arg_info *arg_info)
208 {
209 	const struct btf_type *stub_func_proto, *pointed_type;
210 	const struct btf_param *stub_args, *args;
211 	struct bpf_ctx_arg_aux *info, *info_buf;
212 	u32 nargs, arg_no, info_cnt = 0;
213 	u32 arg_btf_id;
214 	int offset;
215 
216 	stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
217 	if (!stub_func_proto)
218 		return 0;
219 
220 	/* Check if the number of arguments of the stub function is the same
221 	 * as the number of arguments of the function pointer.
222 	 */
223 	nargs = btf_type_vlen(func_proto);
224 	if (nargs != btf_type_vlen(stub_func_proto)) {
225 		pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
226 			st_ops_name, member_name, member_name, st_ops_name);
227 		return -EINVAL;
228 	}
229 
230 	if (!nargs)
231 		return 0;
232 
233 	args = btf_params(func_proto);
234 	stub_args = btf_params(stub_func_proto);
235 
236 	info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
237 	if (!info_buf)
238 		return -ENOMEM;
239 
240 	/* Prepare info for every nullable argument */
241 	info = info_buf;
242 	for (arg_no = 0; arg_no < nargs; arg_no++) {
243 		/* Skip arguments that is not suffixed with
244 		 * "__nullable".
245 		 */
246 		if (!btf_param_match_suffix(btf, &stub_args[arg_no],
247 					    MAYBE_NULL_SUFFIX))
248 			continue;
249 
250 		/* Should be a pointer to struct */
251 		pointed_type = btf_type_resolve_ptr(btf,
252 						    args[arg_no].type,
253 						    &arg_btf_id);
254 		if (!pointed_type ||
255 		    !btf_type_is_struct(pointed_type)) {
256 			pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
257 				st_ops_name, member_name, MAYBE_NULL_SUFFIX);
258 			goto err_out;
259 		}
260 
261 		offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
262 		if (offset < 0) {
263 			pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
264 				st_ops_name, member_name, arg_no);
265 			goto err_out;
266 		}
267 
268 		if (args[arg_no].type != stub_args[arg_no].type) {
269 			pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
270 				arg_no, st_ops_name, member_name);
271 			goto err_out;
272 		}
273 
274 		/* Fill the information of the new argument */
275 		info->reg_type =
276 			PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
277 		info->btf_id = arg_btf_id;
278 		info->btf = btf;
279 		info->offset = offset;
280 
281 		info++;
282 		info_cnt++;
283 	}
284 
285 	if (info_cnt) {
286 		arg_info->info = info_buf;
287 		arg_info->cnt = info_cnt;
288 	} else {
289 		kfree(info_buf);
290 	}
291 
292 	return 0;
293 
294 err_out:
295 	kfree(info_buf);
296 
297 	return -EINVAL;
298 }
299 
300 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)301 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
302 {
303 	struct bpf_struct_ops_arg_info *arg_info;
304 	int i;
305 
306 	arg_info = st_ops_desc->arg_info;
307 	for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
308 		kfree(arg_info[i].info);
309 
310 	kfree(arg_info);
311 }
312 
is_module_member(const struct btf * btf,u32 id)313 static bool is_module_member(const struct btf *btf, u32 id)
314 {
315 	const struct btf_type *t;
316 
317 	t = btf_type_resolve_ptr(btf, id, NULL);
318 	if (!t)
319 		return false;
320 
321 	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
322 		return false;
323 
324 	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
325 }
326 
bpf_struct_ops_desc_init(struct bpf_struct_ops_desc * st_ops_desc,struct btf * btf,struct bpf_verifier_log * log)327 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
328 			     struct btf *btf,
329 			     struct bpf_verifier_log *log)
330 {
331 	struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
332 	struct bpf_struct_ops_arg_info *arg_info;
333 	const struct btf_member *member;
334 	const struct btf_type *t;
335 	s32 type_id, value_id;
336 	char value_name[128];
337 	const char *mname;
338 	int i, err;
339 
340 	if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
341 	    sizeof(value_name)) {
342 		pr_warn("struct_ops name %s is too long\n",
343 			st_ops->name);
344 		return -EINVAL;
345 	}
346 	sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
347 
348 	if (!st_ops->cfi_stubs) {
349 		pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
350 		return -EINVAL;
351 	}
352 
353 	type_id = btf_find_by_name_kind(btf, st_ops->name,
354 					BTF_KIND_STRUCT);
355 	if (type_id < 0) {
356 		pr_warn("Cannot find struct %s in %s\n",
357 			st_ops->name, btf_get_name(btf));
358 		return -EINVAL;
359 	}
360 	t = btf_type_by_id(btf, type_id);
361 	if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
362 		pr_warn("Cannot support #%u members in struct %s\n",
363 			btf_type_vlen(t), st_ops->name);
364 		return -EINVAL;
365 	}
366 
367 	value_id = btf_find_by_name_kind(btf, value_name,
368 					 BTF_KIND_STRUCT);
369 	if (value_id < 0) {
370 		pr_warn("Cannot find struct %s in %s\n",
371 			value_name, btf_get_name(btf));
372 		return -EINVAL;
373 	}
374 	if (!is_valid_value_type(btf, value_id, t, value_name))
375 		return -EINVAL;
376 
377 	arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
378 			   GFP_KERNEL);
379 	if (!arg_info)
380 		return -ENOMEM;
381 
382 	st_ops_desc->arg_info = arg_info;
383 	st_ops_desc->type = t;
384 	st_ops_desc->type_id = type_id;
385 	st_ops_desc->value_id = value_id;
386 	st_ops_desc->value_type = btf_type_by_id(btf, value_id);
387 
388 	for_each_member(i, t, member) {
389 		const struct btf_type *func_proto;
390 
391 		mname = btf_name_by_offset(btf, member->name_off);
392 		if (!*mname) {
393 			pr_warn("anon member in struct %s is not supported\n",
394 				st_ops->name);
395 			err = -EOPNOTSUPP;
396 			goto errout;
397 		}
398 
399 		if (__btf_member_bitfield_size(t, member)) {
400 			pr_warn("bit field member %s in struct %s is not supported\n",
401 				mname, st_ops->name);
402 			err = -EOPNOTSUPP;
403 			goto errout;
404 		}
405 
406 		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
407 			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
408 				st_ops->name);
409 			err = -EOPNOTSUPP;
410 			goto errout;
411 		}
412 
413 		func_proto = btf_type_resolve_func_ptr(btf,
414 						       member->type,
415 						       NULL);
416 		if (!func_proto)
417 			continue;
418 
419 		if (btf_distill_func_proto(log, btf,
420 					   func_proto, mname,
421 					   &st_ops->func_models[i])) {
422 			pr_warn("Error in parsing func ptr %s in struct %s\n",
423 				mname, st_ops->name);
424 			err = -EINVAL;
425 			goto errout;
426 		}
427 
428 		err = prepare_arg_info(btf, st_ops->name, mname,
429 				       func_proto,
430 				       arg_info + i);
431 		if (err)
432 			goto errout;
433 	}
434 
435 	if (st_ops->init(btf)) {
436 		pr_warn("Error in init bpf_struct_ops %s\n",
437 			st_ops->name);
438 		err = -EINVAL;
439 		goto errout;
440 	}
441 
442 	return 0;
443 
444 errout:
445 	bpf_struct_ops_desc_release(st_ops_desc);
446 
447 	return err;
448 }
449 
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)450 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
451 					   void *next_key)
452 {
453 	if (key && *(u32 *)key == 0)
454 		return -ENOENT;
455 
456 	*(u32 *)next_key = 0;
457 	return 0;
458 }
459 
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)460 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
461 				       void *value)
462 {
463 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
464 	struct bpf_struct_ops_value *uvalue, *kvalue;
465 	enum bpf_struct_ops_state state;
466 	s64 refcnt;
467 
468 	if (unlikely(*(u32 *)key != 0))
469 		return -ENOENT;
470 
471 	kvalue = &st_map->kvalue;
472 	/* Pair with smp_store_release() during map_update */
473 	state = smp_load_acquire(&kvalue->common.state);
474 	if (state == BPF_STRUCT_OPS_STATE_INIT) {
475 		memset(value, 0, map->value_size);
476 		return 0;
477 	}
478 
479 	/* No lock is needed.  state and refcnt do not need
480 	 * to be updated together under atomic context.
481 	 */
482 	uvalue = value;
483 	memcpy(uvalue, st_map->uvalue, map->value_size);
484 	uvalue->common.state = state;
485 
486 	/* This value offers the user space a general estimate of how
487 	 * many sockets are still utilizing this struct_ops for TCP
488 	 * congestion control. The number might not be exact, but it
489 	 * should sufficiently meet our present goals.
490 	 */
491 	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
492 	refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
493 
494 	return 0;
495 }
496 
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)497 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
498 {
499 	return ERR_PTR(-EINVAL);
500 }
501 
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)502 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
503 {
504 	u32 i;
505 
506 	for (i = 0; i < st_map->funcs_cnt; i++) {
507 		if (!st_map->links[i])
508 			break;
509 		bpf_link_put(st_map->links[i]);
510 		st_map->links[i] = NULL;
511 	}
512 }
513 
bpf_struct_ops_map_free_image(struct bpf_struct_ops_map * st_map)514 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
515 {
516 	int i;
517 
518 	for (i = 0; i < st_map->image_pages_cnt; i++)
519 		bpf_struct_ops_image_free(st_map->image_pages[i]);
520 	st_map->image_pages_cnt = 0;
521 }
522 
check_zero_holes(const struct btf * btf,const struct btf_type * t,void * data)523 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
524 {
525 	const struct btf_member *member;
526 	u32 i, moff, msize, prev_mend = 0;
527 	const struct btf_type *mtype;
528 
529 	for_each_member(i, t, member) {
530 		moff = __btf_member_bit_offset(t, member) / 8;
531 		if (moff > prev_mend &&
532 		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
533 			return -EINVAL;
534 
535 		mtype = btf_type_by_id(btf, member->type);
536 		mtype = btf_resolve_size(btf, mtype, &msize);
537 		if (IS_ERR(mtype))
538 			return PTR_ERR(mtype);
539 		prev_mend = moff + msize;
540 	}
541 
542 	if (t->size > prev_mend &&
543 	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
544 		return -EINVAL;
545 
546 	return 0;
547 }
548 
bpf_struct_ops_link_release(struct bpf_link * link)549 static void bpf_struct_ops_link_release(struct bpf_link *link)
550 {
551 }
552 
bpf_struct_ops_link_dealloc(struct bpf_link * link)553 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
554 {
555 	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
556 
557 	kfree(tlink);
558 }
559 
560 const struct bpf_link_ops bpf_struct_ops_link_lops = {
561 	.release = bpf_struct_ops_link_release,
562 	.dealloc = bpf_struct_ops_link_dealloc,
563 };
564 
bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links * tlinks,struct bpf_tramp_link * link,const struct btf_func_model * model,void * stub_func,void ** _image,u32 * _image_off,bool allow_alloc)565 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
566 				      struct bpf_tramp_link *link,
567 				      const struct btf_func_model *model,
568 				      void *stub_func,
569 				      void **_image, u32 *_image_off,
570 				      bool allow_alloc)
571 {
572 	u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
573 	void *image = *_image;
574 	int size;
575 
576 	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
577 	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
578 
579 	if (model->ret_size > 0)
580 		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
581 
582 	size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
583 	if (size <= 0)
584 		return size ? : -EFAULT;
585 
586 	/* Allocate image buffer if necessary */
587 	if (!image || size > PAGE_SIZE - image_off) {
588 		if (!allow_alloc)
589 			return -E2BIG;
590 
591 		image = bpf_struct_ops_image_alloc();
592 		if (IS_ERR(image))
593 			return PTR_ERR(image);
594 		image_off = 0;
595 	}
596 
597 	size = arch_prepare_bpf_trampoline(NULL, image + image_off,
598 					   image + image_off + size,
599 					   model, flags, tlinks, stub_func);
600 	if (size <= 0) {
601 		if (image != *_image)
602 			bpf_struct_ops_image_free(image);
603 		return size ? : -EFAULT;
604 	}
605 
606 	*_image = image;
607 	*_image_off = image_off + size;
608 	return 0;
609 }
610 
bpf_struct_ops_ksym_init(const char * tname,const char * mname,void * image,unsigned int size,struct bpf_ksym * ksym)611 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
612 				     void *image, unsigned int size,
613 				     struct bpf_ksym *ksym)
614 {
615 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
616 	INIT_LIST_HEAD_RCU(&ksym->lnode);
617 	bpf_image_ksym_init(image, size, ksym);
618 }
619 
bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map * st_map)620 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
621 {
622 	u32 i;
623 
624 	for (i = 0; i < st_map->funcs_cnt; i++) {
625 		if (!st_map->ksyms[i])
626 			break;
627 		bpf_image_ksym_add(st_map->ksyms[i]);
628 	}
629 }
630 
bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map * st_map)631 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
632 {
633 	u32 i;
634 
635 	for (i = 0; i < st_map->funcs_cnt; i++) {
636 		if (!st_map->ksyms[i])
637 			break;
638 		bpf_image_ksym_del(st_map->ksyms[i]);
639 	}
640 }
641 
bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map * st_map)642 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
643 {
644 	u32 i;
645 
646 	for (i = 0; i < st_map->funcs_cnt; i++) {
647 		if (!st_map->ksyms[i])
648 			break;
649 		kfree(st_map->ksyms[i]);
650 		st_map->ksyms[i] = NULL;
651 	}
652 }
653 
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)654 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
655 					   void *value, u64 flags)
656 {
657 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
658 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
659 	const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
660 	struct bpf_struct_ops_value *uvalue, *kvalue;
661 	const struct btf_type *module_type;
662 	const struct btf_member *member;
663 	const struct btf_type *t = st_ops_desc->type;
664 	struct bpf_tramp_links *tlinks;
665 	void *udata, *kdata;
666 	int prog_fd, err;
667 	u32 i, trampoline_start, image_off = 0;
668 	void *cur_image = NULL, *image = NULL;
669 	struct bpf_link **plink;
670 	struct bpf_ksym **pksym;
671 	const char *tname, *mname;
672 
673 	if (flags)
674 		return -EINVAL;
675 
676 	if (*(u32 *)key != 0)
677 		return -E2BIG;
678 
679 	err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
680 	if (err)
681 		return err;
682 
683 	uvalue = value;
684 	err = check_zero_holes(st_map->btf, t, uvalue->data);
685 	if (err)
686 		return err;
687 
688 	if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
689 		return -EINVAL;
690 
691 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
692 	if (!tlinks)
693 		return -ENOMEM;
694 
695 	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
696 	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
697 
698 	mutex_lock(&st_map->lock);
699 
700 	if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
701 		err = -EBUSY;
702 		goto unlock;
703 	}
704 
705 	memcpy(uvalue, value, map->value_size);
706 
707 	udata = &uvalue->data;
708 	kdata = &kvalue->data;
709 
710 	plink = st_map->links;
711 	pksym = st_map->ksyms;
712 	tname = btf_name_by_offset(st_map->btf, t->name_off);
713 	module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
714 	for_each_member(i, t, member) {
715 		const struct btf_type *mtype, *ptype;
716 		struct bpf_prog *prog;
717 		struct bpf_tramp_link *link;
718 		struct bpf_ksym *ksym;
719 		u32 moff;
720 
721 		moff = __btf_member_bit_offset(t, member) / 8;
722 		mname = btf_name_by_offset(st_map->btf, member->name_off);
723 		ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
724 		if (ptype == module_type) {
725 			if (*(void **)(udata + moff))
726 				goto reset_unlock;
727 			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
728 			continue;
729 		}
730 
731 		err = st_ops->init_member(t, member, kdata, udata);
732 		if (err < 0)
733 			goto reset_unlock;
734 
735 		/* The ->init_member() has handled this member */
736 		if (err > 0)
737 			continue;
738 
739 		/* If st_ops->init_member does not handle it,
740 		 * we will only handle func ptrs and zero-ed members
741 		 * here.  Reject everything else.
742 		 */
743 
744 		/* All non func ptr member must be 0 */
745 		if (!ptype || !btf_type_is_func_proto(ptype)) {
746 			u32 msize;
747 
748 			mtype = btf_type_by_id(st_map->btf, member->type);
749 			mtype = btf_resolve_size(st_map->btf, mtype, &msize);
750 			if (IS_ERR(mtype)) {
751 				err = PTR_ERR(mtype);
752 				goto reset_unlock;
753 			}
754 
755 			if (memchr_inv(udata + moff, 0, msize)) {
756 				err = -EINVAL;
757 				goto reset_unlock;
758 			}
759 
760 			continue;
761 		}
762 
763 		prog_fd = (int)(*(unsigned long *)(udata + moff));
764 		/* Similar check as the attr->attach_prog_fd */
765 		if (!prog_fd)
766 			continue;
767 
768 		prog = bpf_prog_get(prog_fd);
769 		if (IS_ERR(prog)) {
770 			err = PTR_ERR(prog);
771 			goto reset_unlock;
772 		}
773 
774 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
775 		    prog->aux->attach_btf_id != st_ops_desc->type_id ||
776 		    prog->expected_attach_type != i) {
777 			bpf_prog_put(prog);
778 			err = -EINVAL;
779 			goto reset_unlock;
780 		}
781 
782 		link = kzalloc(sizeof(*link), GFP_USER);
783 		if (!link) {
784 			bpf_prog_put(prog);
785 			err = -ENOMEM;
786 			goto reset_unlock;
787 		}
788 		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
789 			      &bpf_struct_ops_link_lops, prog);
790 		*plink++ = &link->link;
791 
792 		ksym = kzalloc(sizeof(*ksym), GFP_USER);
793 		if (!ksym) {
794 			err = -ENOMEM;
795 			goto reset_unlock;
796 		}
797 		*pksym++ = ksym;
798 
799 		trampoline_start = image_off;
800 		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
801 						&st_ops->func_models[i],
802 						*(void **)(st_ops->cfi_stubs + moff),
803 						&image, &image_off,
804 						st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
805 		if (err)
806 			goto reset_unlock;
807 
808 		if (cur_image != image) {
809 			st_map->image_pages[st_map->image_pages_cnt++] = image;
810 			cur_image = image;
811 			trampoline_start = 0;
812 		}
813 
814 		*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
815 
816 		/* put prog_id to udata */
817 		*(unsigned long *)(udata + moff) = prog->aux->id;
818 
819 		/* init ksym for this trampoline */
820 		bpf_struct_ops_ksym_init(tname, mname,
821 					 image + trampoline_start,
822 					 image_off - trampoline_start,
823 					 ksym);
824 	}
825 
826 	if (st_ops->validate) {
827 		err = st_ops->validate(kdata);
828 		if (err)
829 			goto reset_unlock;
830 	}
831 	for (i = 0; i < st_map->image_pages_cnt; i++) {
832 		err = arch_protect_bpf_trampoline(st_map->image_pages[i],
833 						  PAGE_SIZE);
834 		if (err)
835 			goto reset_unlock;
836 	}
837 
838 	if (st_map->map.map_flags & BPF_F_LINK) {
839 		err = 0;
840 		/* Let bpf_link handle registration & unregistration.
841 		 *
842 		 * Pair with smp_load_acquire() during lookup_elem().
843 		 */
844 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
845 		goto unlock;
846 	}
847 
848 	err = st_ops->reg(kdata, NULL);
849 	if (likely(!err)) {
850 		/* This refcnt increment on the map here after
851 		 * 'st_ops->reg()' is secure since the state of the
852 		 * map must be set to INIT at this moment, and thus
853 		 * bpf_struct_ops_map_delete_elem() can't unregister
854 		 * or transition it to TOBEFREE concurrently.
855 		 */
856 		bpf_map_inc(map);
857 		/* Pair with smp_load_acquire() during lookup_elem().
858 		 * It ensures the above udata updates (e.g. prog->aux->id)
859 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
860 		 */
861 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
862 		goto unlock;
863 	}
864 
865 	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
866 	 * verified as a whole, after all init_member() calls. Can also happen if
867 	 * there was a race in registering the struct_ops (under the same name) to
868 	 * a sub-system through different struct_ops's maps.
869 	 */
870 
871 reset_unlock:
872 	bpf_struct_ops_map_free_ksyms(st_map);
873 	bpf_struct_ops_map_free_image(st_map);
874 	bpf_struct_ops_map_put_progs(st_map);
875 	memset(uvalue, 0, map->value_size);
876 	memset(kvalue, 0, map->value_size);
877 unlock:
878 	kfree(tlinks);
879 	mutex_unlock(&st_map->lock);
880 	if (!err)
881 		bpf_struct_ops_map_add_ksyms(st_map);
882 	return err;
883 }
884 
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)885 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
886 {
887 	enum bpf_struct_ops_state prev_state;
888 	struct bpf_struct_ops_map *st_map;
889 
890 	st_map = (struct bpf_struct_ops_map *)map;
891 	if (st_map->map.map_flags & BPF_F_LINK)
892 		return -EOPNOTSUPP;
893 
894 	prev_state = cmpxchg(&st_map->kvalue.common.state,
895 			     BPF_STRUCT_OPS_STATE_INUSE,
896 			     BPF_STRUCT_OPS_STATE_TOBEFREE);
897 	switch (prev_state) {
898 	case BPF_STRUCT_OPS_STATE_INUSE:
899 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
900 		bpf_map_put(map);
901 		return 0;
902 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
903 		return -EINPROGRESS;
904 	case BPF_STRUCT_OPS_STATE_INIT:
905 		return -ENOENT;
906 	default:
907 		WARN_ON_ONCE(1);
908 		/* Should never happen.  Treat it as not found. */
909 		return -ENOENT;
910 	}
911 }
912 
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)913 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
914 					     struct seq_file *m)
915 {
916 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
917 	void *value;
918 	int err;
919 
920 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
921 	if (!value)
922 		return;
923 
924 	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
925 	if (!err) {
926 		btf_type_seq_show(st_map->btf,
927 				  map->btf_vmlinux_value_type_id,
928 				  value, m);
929 		seq_putc(m, '\n');
930 	}
931 
932 	kfree(value);
933 }
934 
__bpf_struct_ops_map_free(struct bpf_map * map)935 static void __bpf_struct_ops_map_free(struct bpf_map *map)
936 {
937 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
938 
939 	if (st_map->links)
940 		bpf_struct_ops_map_put_progs(st_map);
941 	if (st_map->ksyms)
942 		bpf_struct_ops_map_free_ksyms(st_map);
943 	bpf_map_area_free(st_map->links);
944 	bpf_map_area_free(st_map->ksyms);
945 	bpf_struct_ops_map_free_image(st_map);
946 	bpf_map_area_free(st_map->uvalue);
947 	bpf_map_area_free(st_map);
948 }
949 
bpf_struct_ops_map_free(struct bpf_map * map)950 static void bpf_struct_ops_map_free(struct bpf_map *map)
951 {
952 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
953 
954 	/* st_ops->owner was acquired during map_alloc to implicitly holds
955 	 * the btf's refcnt. The acquire was only done when btf_is_module()
956 	 * st_map->btf cannot be NULL here.
957 	 */
958 	if (btf_is_module(st_map->btf))
959 		module_put(st_map->st_ops_desc->st_ops->owner);
960 
961 	bpf_struct_ops_map_del_ksyms(st_map);
962 
963 	/* The struct_ops's function may switch to another struct_ops.
964 	 *
965 	 * For example, bpf_tcp_cc_x->init() may switch to
966 	 * another tcp_cc_y by calling
967 	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
968 	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
969 	 * and its refcount may reach 0 which then free its
970 	 * trampoline image while tcp_cc_x is still running.
971 	 *
972 	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
973 	 * to finish. bpf-tcp-cc prog is non sleepable.
974 	 * A rcu_tasks gp is to wait for the last few insn
975 	 * in the tramopline image to finish before releasing
976 	 * the trampoline image.
977 	 */
978 	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
979 
980 	__bpf_struct_ops_map_free(map);
981 }
982 
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)983 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
984 {
985 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
986 	    (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
987 	    !attr->btf_vmlinux_value_type_id)
988 		return -EINVAL;
989 	return 0;
990 }
991 
count_func_ptrs(const struct btf * btf,const struct btf_type * t)992 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
993 {
994 	int i;
995 	u32 count;
996 	const struct btf_member *member;
997 
998 	count = 0;
999 	for_each_member(i, t, member)
1000 		if (btf_type_resolve_func_ptr(btf, member->type, NULL))
1001 			count++;
1002 	return count;
1003 }
1004 
bpf_struct_ops_map_alloc(union bpf_attr * attr)1005 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1006 {
1007 	const struct bpf_struct_ops_desc *st_ops_desc;
1008 	size_t st_map_size;
1009 	struct bpf_struct_ops_map *st_map;
1010 	const struct btf_type *t, *vt;
1011 	struct module *mod = NULL;
1012 	struct bpf_map *map;
1013 	struct btf *btf;
1014 	int ret;
1015 
1016 	if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1017 		/* The map holds btf for its whole life time. */
1018 		btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1019 		if (IS_ERR(btf))
1020 			return ERR_CAST(btf);
1021 		if (!btf_is_module(btf)) {
1022 			btf_put(btf);
1023 			return ERR_PTR(-EINVAL);
1024 		}
1025 
1026 		mod = btf_try_get_module(btf);
1027 		/* mod holds a refcnt to btf. We don't need an extra refcnt
1028 		 * here.
1029 		 */
1030 		btf_put(btf);
1031 		if (!mod)
1032 			return ERR_PTR(-EINVAL);
1033 	} else {
1034 		btf = bpf_get_btf_vmlinux();
1035 		if (IS_ERR(btf))
1036 			return ERR_CAST(btf);
1037 		if (!btf)
1038 			return ERR_PTR(-ENOTSUPP);
1039 	}
1040 
1041 	st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1042 	if (!st_ops_desc) {
1043 		ret = -ENOTSUPP;
1044 		goto errout;
1045 	}
1046 
1047 	vt = st_ops_desc->value_type;
1048 	if (attr->value_size != vt->size) {
1049 		ret = -EINVAL;
1050 		goto errout;
1051 	}
1052 
1053 	t = st_ops_desc->type;
1054 
1055 	st_map_size = sizeof(*st_map) +
1056 		/* kvalue stores the
1057 		 * struct bpf_struct_ops_tcp_congestions_ops
1058 		 */
1059 		(vt->size - sizeof(struct bpf_struct_ops_value));
1060 
1061 	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1062 	if (!st_map) {
1063 		ret = -ENOMEM;
1064 		goto errout;
1065 	}
1066 
1067 	st_map->st_ops_desc = st_ops_desc;
1068 	map = &st_map->map;
1069 
1070 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1071 	st_map->funcs_cnt = count_func_ptrs(btf, t);
1072 	st_map->links =
1073 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1074 				   NUMA_NO_NODE);
1075 
1076 	st_map->ksyms =
1077 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1078 				   NUMA_NO_NODE);
1079 	if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1080 		ret = -ENOMEM;
1081 		goto errout_free;
1082 	}
1083 	st_map->btf = btf;
1084 
1085 	mutex_init(&st_map->lock);
1086 	bpf_map_init_from_attr(map, attr);
1087 
1088 	return map;
1089 
1090 errout_free:
1091 	__bpf_struct_ops_map_free(map);
1092 errout:
1093 	module_put(mod);
1094 
1095 	return ERR_PTR(ret);
1096 }
1097 
bpf_struct_ops_map_mem_usage(const struct bpf_map * map)1098 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1099 {
1100 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1101 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1102 	const struct btf_type *vt = st_ops_desc->value_type;
1103 	u64 usage;
1104 
1105 	usage = sizeof(*st_map) +
1106 			vt->size - sizeof(struct bpf_struct_ops_value);
1107 	usage += vt->size;
1108 	usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1109 	usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1110 	usage += PAGE_SIZE;
1111 	return usage;
1112 }
1113 
1114 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1115 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1116 	.map_alloc_check = bpf_struct_ops_map_alloc_check,
1117 	.map_alloc = bpf_struct_ops_map_alloc,
1118 	.map_free = bpf_struct_ops_map_free,
1119 	.map_get_next_key = bpf_struct_ops_map_get_next_key,
1120 	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1121 	.map_delete_elem = bpf_struct_ops_map_delete_elem,
1122 	.map_update_elem = bpf_struct_ops_map_update_elem,
1123 	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1124 	.map_mem_usage = bpf_struct_ops_map_mem_usage,
1125 	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1126 };
1127 
1128 /* "const void *" because some subsystem is
1129  * passing a const (e.g. const struct tcp_congestion_ops *)
1130  */
bpf_struct_ops_get(const void * kdata)1131 bool bpf_struct_ops_get(const void *kdata)
1132 {
1133 	struct bpf_struct_ops_value *kvalue;
1134 	struct bpf_struct_ops_map *st_map;
1135 	struct bpf_map *map;
1136 
1137 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1138 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1139 
1140 	map = __bpf_map_inc_not_zero(&st_map->map, false);
1141 	return !IS_ERR(map);
1142 }
1143 
bpf_struct_ops_put(const void * kdata)1144 void bpf_struct_ops_put(const void *kdata)
1145 {
1146 	struct bpf_struct_ops_value *kvalue;
1147 	struct bpf_struct_ops_map *st_map;
1148 
1149 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1150 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1151 
1152 	bpf_map_put(&st_map->map);
1153 }
1154 
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)1155 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
1156 {
1157 	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
1158 
1159 	return func_ptr ? 0 : -ENOTSUPP;
1160 }
1161 
bpf_struct_ops_valid_to_reg(struct bpf_map * map)1162 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1163 {
1164 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1165 
1166 	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1167 		map->map_flags & BPF_F_LINK &&
1168 		/* Pair with smp_store_release() during map_update */
1169 		smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1170 }
1171 
bpf_struct_ops_map_link_dealloc(struct bpf_link * link)1172 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1173 {
1174 	struct bpf_struct_ops_link *st_link;
1175 	struct bpf_struct_ops_map *st_map;
1176 
1177 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1178 	st_map = (struct bpf_struct_ops_map *)
1179 		rcu_dereference_protected(st_link->map, true);
1180 	if (st_map) {
1181 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1182 		bpf_map_put(&st_map->map);
1183 	}
1184 	kfree(st_link);
1185 }
1186 
bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1187 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1188 					    struct seq_file *seq)
1189 {
1190 	struct bpf_struct_ops_link *st_link;
1191 	struct bpf_map *map;
1192 
1193 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1194 	rcu_read_lock();
1195 	map = rcu_dereference(st_link->map);
1196 	if (map)
1197 		seq_printf(seq, "map_id:\t%d\n", map->id);
1198 	rcu_read_unlock();
1199 }
1200 
bpf_struct_ops_map_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1201 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1202 					       struct bpf_link_info *info)
1203 {
1204 	struct bpf_struct_ops_link *st_link;
1205 	struct bpf_map *map;
1206 
1207 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1208 	rcu_read_lock();
1209 	map = rcu_dereference(st_link->map);
1210 	if (map)
1211 		info->struct_ops.map_id = map->id;
1212 	rcu_read_unlock();
1213 	return 0;
1214 }
1215 
bpf_struct_ops_map_link_update(struct bpf_link * link,struct bpf_map * new_map,struct bpf_map * expected_old_map)1216 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1217 					  struct bpf_map *expected_old_map)
1218 {
1219 	struct bpf_struct_ops_map *st_map, *old_st_map;
1220 	struct bpf_map *old_map;
1221 	struct bpf_struct_ops_link *st_link;
1222 	int err;
1223 
1224 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1225 	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1226 
1227 	if (!bpf_struct_ops_valid_to_reg(new_map))
1228 		return -EINVAL;
1229 
1230 	if (!st_map->st_ops_desc->st_ops->update)
1231 		return -EOPNOTSUPP;
1232 
1233 	mutex_lock(&update_mutex);
1234 
1235 	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1236 	if (!old_map) {
1237 		err = -ENOLINK;
1238 		goto err_out;
1239 	}
1240 	if (expected_old_map && old_map != expected_old_map) {
1241 		err = -EPERM;
1242 		goto err_out;
1243 	}
1244 
1245 	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1246 	/* The new and old struct_ops must be the same type. */
1247 	if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1248 		err = -EINVAL;
1249 		goto err_out;
1250 	}
1251 
1252 	err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1253 	if (err)
1254 		goto err_out;
1255 
1256 	bpf_map_inc(new_map);
1257 	rcu_assign_pointer(st_link->map, new_map);
1258 	bpf_map_put(old_map);
1259 
1260 err_out:
1261 	mutex_unlock(&update_mutex);
1262 
1263 	return err;
1264 }
1265 
bpf_struct_ops_map_link_detach(struct bpf_link * link)1266 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1267 {
1268 	struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1269 	struct bpf_struct_ops_map *st_map;
1270 	struct bpf_map *map;
1271 
1272 	mutex_lock(&update_mutex);
1273 
1274 	map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1275 	if (!map) {
1276 		mutex_unlock(&update_mutex);
1277 		return 0;
1278 	}
1279 	st_map = container_of(map, struct bpf_struct_ops_map, map);
1280 
1281 	st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1282 
1283 	RCU_INIT_POINTER(st_link->map, NULL);
1284 	/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1285 	 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1286 	 */
1287 	bpf_map_put(&st_map->map);
1288 
1289 	mutex_unlock(&update_mutex);
1290 
1291 	wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1292 
1293 	return 0;
1294 }
1295 
bpf_struct_ops_map_link_poll(struct file * file,struct poll_table_struct * pts)1296 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1297 					     struct poll_table_struct *pts)
1298 {
1299 	struct bpf_struct_ops_link *st_link = file->private_data;
1300 
1301 	poll_wait(file, &st_link->wait_hup, pts);
1302 
1303 	return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1304 }
1305 
1306 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1307 	.dealloc = bpf_struct_ops_map_link_dealloc,
1308 	.detach = bpf_struct_ops_map_link_detach,
1309 	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1310 	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1311 	.update_map = bpf_struct_ops_map_link_update,
1312 	.poll = bpf_struct_ops_map_link_poll,
1313 };
1314 
bpf_struct_ops_link_create(union bpf_attr * attr)1315 int bpf_struct_ops_link_create(union bpf_attr *attr)
1316 {
1317 	struct bpf_struct_ops_link *link = NULL;
1318 	struct bpf_link_primer link_primer;
1319 	struct bpf_struct_ops_map *st_map;
1320 	struct bpf_map *map;
1321 	int err;
1322 
1323 	map = bpf_map_get(attr->link_create.map_fd);
1324 	if (IS_ERR(map))
1325 		return PTR_ERR(map);
1326 
1327 	st_map = (struct bpf_struct_ops_map *)map;
1328 
1329 	if (!bpf_struct_ops_valid_to_reg(map)) {
1330 		err = -EINVAL;
1331 		goto err_out;
1332 	}
1333 
1334 	link = kzalloc(sizeof(*link), GFP_USER);
1335 	if (!link) {
1336 		err = -ENOMEM;
1337 		goto err_out;
1338 	}
1339 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1340 
1341 	err = bpf_link_prime(&link->link, &link_primer);
1342 	if (err)
1343 		goto err_out;
1344 
1345 	init_waitqueue_head(&link->wait_hup);
1346 
1347 	/* Hold the update_mutex such that the subsystem cannot
1348 	 * do link->ops->detach() before the link is fully initialized.
1349 	 */
1350 	mutex_lock(&update_mutex);
1351 	err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1352 	if (err) {
1353 		mutex_unlock(&update_mutex);
1354 		bpf_link_cleanup(&link_primer);
1355 		link = NULL;
1356 		goto err_out;
1357 	}
1358 	RCU_INIT_POINTER(link->map, map);
1359 	mutex_unlock(&update_mutex);
1360 
1361 	return bpf_link_settle(&link_primer);
1362 
1363 err_out:
1364 	bpf_map_put(map);
1365 	kfree(link);
1366 	return err;
1367 }
1368 
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)1369 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1370 {
1371 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1372 
1373 	info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1374 }
1375