xref: /linux/kernel/bpf/bpf_struct_ops.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16 
17 struct bpf_struct_ops_value {
18 	struct bpf_struct_ops_common_value common;
19 	char data[] ____cacheline_aligned_in_smp;
20 };
21 
22 #define MAX_TRAMP_IMAGE_PAGES 8
23 
24 struct bpf_struct_ops_map {
25 	struct bpf_map map;
26 	const struct bpf_struct_ops_desc *st_ops_desc;
27 	/* protect map_update */
28 	struct mutex lock;
29 	/* link has all the bpf_links that is populated
30 	 * to the func ptr of the kernel's struct
31 	 * (in kvalue.data).
32 	 */
33 	struct bpf_link **links;
34 	/* ksyms for bpf trampolines */
35 	struct bpf_ksym **ksyms;
36 	u32 funcs_cnt;
37 	u32 image_pages_cnt;
38 	/* image_pages is an array of pages that has all the trampolines
39 	 * that stores the func args before calling the bpf_prog.
40 	 */
41 	void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 	/* The owner moduler's btf. */
43 	struct btf *btf;
44 	/* uvalue->data stores the kernel struct
45 	 * (e.g. tcp_congestion_ops) that is more useful
46 	 * to userspace than the kvalue.  For example,
47 	 * the bpf_prog's id is stored instead of the kernel
48 	 * address of a func ptr.
49 	 */
50 	struct bpf_struct_ops_value *uvalue;
51 	/* kvalue.data stores the actual kernel's struct
52 	 * (e.g. tcp_congestion_ops) that will be
53 	 * registered to the kernel subsystem.
54 	 */
55 	struct bpf_struct_ops_value kvalue;
56 };
57 
58 struct bpf_struct_ops_link {
59 	struct bpf_link link;
60 	struct bpf_map __rcu *map;
61 	wait_queue_head_t wait_hup;
62 };
63 
64 static DEFINE_MUTEX(update_mutex);
65 
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
68 
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
70 };
71 
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
73 #ifdef CONFIG_NET
74 	.test_run = bpf_struct_ops_test_run,
75 #endif
76 };
77 
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
81 
82 enum {
83 	IDX_MODULE_ID,
84 	IDX_ST_OPS_COMMON_VALUE_ID,
85 };
86 
87 extern struct btf *btf_vmlinux;
88 
89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 				const struct btf_type *type,
91 				const char *value_name)
92 {
93 	const struct btf_type *common_value_type;
94 	const struct btf_member *member;
95 	const struct btf_type *vt, *mt;
96 
97 	vt = btf_type_by_id(btf, value_id);
98 	if (btf_vlen(vt) != 2) {
99 		pr_warn("The number of %s's members should be 2, but we get %d\n",
100 			value_name, btf_vlen(vt));
101 		return false;
102 	}
103 	member = btf_type_member(vt);
104 	mt = btf_type_by_id(btf, member->type);
105 	common_value_type = btf_type_by_id(btf_vmlinux,
106 					   st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 	if (mt != common_value_type) {
108 		pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
109 			value_name);
110 		return false;
111 	}
112 	member++;
113 	mt = btf_type_by_id(btf, member->type);
114 	if (mt != type) {
115 		pr_warn("The second member of %s should be %s\n",
116 			value_name, btf_name_by_offset(btf, type->name_off));
117 		return false;
118 	}
119 
120 	return true;
121 }
122 
123 static void *bpf_struct_ops_image_alloc(void)
124 {
125 	void *image;
126 	int err;
127 
128 	err = bpf_jit_charge_modmem(PAGE_SIZE);
129 	if (err)
130 		return ERR_PTR(err);
131 	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
132 	if (!image) {
133 		bpf_jit_uncharge_modmem(PAGE_SIZE);
134 		return ERR_PTR(-ENOMEM);
135 	}
136 
137 	return image;
138 }
139 
140 void bpf_struct_ops_image_free(void *image)
141 {
142 	if (image) {
143 		arch_free_bpf_trampoline(image, PAGE_SIZE);
144 		bpf_jit_uncharge_modmem(PAGE_SIZE);
145 	}
146 }
147 
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define REFCOUNTED_SUFFIX "__ref"
150 
151 /* Prepare argument info for every nullable argument of a member of a
152  * struct_ops type.
153  *
154  * Initialize a struct bpf_struct_ops_arg_info according to type info of
155  * the arguments of a stub function. (Check kCFI for more information about
156  * stub functions.)
157  *
158  * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
159  * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
160  * the information that used by the verifier to check the arguments of the
161  * BPF struct_ops program assigned to the member. Here, we only care about
162  * the arguments that are marked as __nullable.
163  *
164  * The array of struct bpf_ctx_arg_aux is eventually assigned to
165  * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
166  * verifier. (See check_struct_ops_btf_id())
167  *
168  * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
169  * fails, it will be kept untouched.
170  */
171 static int prepare_arg_info(struct btf *btf,
172 			    const char *st_ops_name,
173 			    const char *member_name,
174 			    const struct btf_type *func_proto, void *stub_func_addr,
175 			    struct bpf_struct_ops_arg_info *arg_info)
176 {
177 	const struct btf_type *stub_func_proto, *pointed_type;
178 	bool is_nullable = false, is_refcounted = false;
179 	const struct btf_param *stub_args, *args;
180 	struct bpf_ctx_arg_aux *info, *info_buf;
181 	u32 nargs, arg_no, info_cnt = 0;
182 	char ksym[KSYM_SYMBOL_LEN];
183 	const char *stub_fname;
184 	const char *suffix;
185 	s32 stub_func_id;
186 	u32 arg_btf_id;
187 	int offset;
188 
189 	stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym);
190 	if (!stub_fname) {
191 		pr_warn("Cannot find the stub function name for the %s in struct %s\n",
192 			member_name, st_ops_name);
193 		return -ENOENT;
194 	}
195 
196 	stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC);
197 	if (stub_func_id < 0) {
198 		pr_warn("Cannot find the stub function %s in btf\n", stub_fname);
199 		return -ENOENT;
200 	}
201 
202 	stub_func_proto = btf_type_by_id(btf, stub_func_id);
203 	stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
204 
205 	/* Check if the number of arguments of the stub function is the same
206 	 * as the number of arguments of the function pointer.
207 	 */
208 	nargs = btf_type_vlen(func_proto);
209 	if (nargs != btf_type_vlen(stub_func_proto)) {
210 		pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n",
211 			stub_fname, member_name, st_ops_name);
212 		return -EINVAL;
213 	}
214 
215 	if (!nargs)
216 		return 0;
217 
218 	args = btf_params(func_proto);
219 	stub_args = btf_params(stub_func_proto);
220 
221 	info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
222 	if (!info_buf)
223 		return -ENOMEM;
224 
225 	/* Prepare info for every nullable argument */
226 	info = info_buf;
227 	for (arg_no = 0; arg_no < nargs; arg_no++) {
228 		/* Skip arguments that is not suffixed with
229 		 * "__nullable or __ref".
230 		 */
231 		is_nullable = btf_param_match_suffix(btf, &stub_args[arg_no],
232 						     MAYBE_NULL_SUFFIX);
233 		is_refcounted = btf_param_match_suffix(btf, &stub_args[arg_no],
234 						       REFCOUNTED_SUFFIX);
235 
236 		if (is_nullable)
237 			suffix = MAYBE_NULL_SUFFIX;
238 		else if (is_refcounted)
239 			suffix = REFCOUNTED_SUFFIX;
240 		else
241 			continue;
242 
243 		/* Should be a pointer to struct */
244 		pointed_type = btf_type_resolve_ptr(btf,
245 						    args[arg_no].type,
246 						    &arg_btf_id);
247 		if (!pointed_type ||
248 		    !btf_type_is_struct(pointed_type)) {
249 			pr_warn("stub function %s has %s tagging to an unsupported type\n",
250 				stub_fname, suffix);
251 			goto err_out;
252 		}
253 
254 		offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
255 		if (offset < 0) {
256 			pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n",
257 				stub_fname, arg_no);
258 			goto err_out;
259 		}
260 
261 		if (args[arg_no].type != stub_args[arg_no].type) {
262 			pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n",
263 				arg_no, stub_fname);
264 			goto err_out;
265 		}
266 
267 		/* Fill the information of the new argument */
268 		info->btf_id = arg_btf_id;
269 		info->btf = btf;
270 		info->offset = offset;
271 		if (is_nullable) {
272 			info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
273 		} else if (is_refcounted) {
274 			info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID;
275 			info->refcounted = true;
276 		}
277 
278 		info++;
279 		info_cnt++;
280 	}
281 
282 	if (info_cnt) {
283 		arg_info->info = info_buf;
284 		arg_info->cnt = info_cnt;
285 	} else {
286 		kfree(info_buf);
287 	}
288 
289 	return 0;
290 
291 err_out:
292 	kfree(info_buf);
293 
294 	return -EINVAL;
295 }
296 
297 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
298 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
299 {
300 	struct bpf_struct_ops_arg_info *arg_info;
301 	int i;
302 
303 	arg_info = st_ops_desc->arg_info;
304 	for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
305 		kfree(arg_info[i].info);
306 
307 	kfree(arg_info);
308 }
309 
310 static bool is_module_member(const struct btf *btf, u32 id)
311 {
312 	const struct btf_type *t;
313 
314 	t = btf_type_resolve_ptr(btf, id, NULL);
315 	if (!t)
316 		return false;
317 
318 	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
319 		return false;
320 
321 	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
322 }
323 
324 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
325 {
326 	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
327 
328 	return func_ptr ? 0 : -ENOTSUPP;
329 }
330 
331 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
332 			     struct btf *btf,
333 			     struct bpf_verifier_log *log)
334 {
335 	struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
336 	struct bpf_struct_ops_arg_info *arg_info;
337 	const struct btf_member *member;
338 	const struct btf_type *t;
339 	s32 type_id, value_id;
340 	char value_name[128];
341 	const char *mname;
342 	int i, err;
343 
344 	if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
345 	    sizeof(value_name)) {
346 		pr_warn("struct_ops name %s is too long\n",
347 			st_ops->name);
348 		return -EINVAL;
349 	}
350 	sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
351 
352 	if (!st_ops->cfi_stubs) {
353 		pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
354 		return -EINVAL;
355 	}
356 
357 	type_id = btf_find_by_name_kind(btf, st_ops->name,
358 					BTF_KIND_STRUCT);
359 	if (type_id < 0) {
360 		pr_warn("Cannot find struct %s in %s\n",
361 			st_ops->name, btf_get_name(btf));
362 		return -EINVAL;
363 	}
364 	t = btf_type_by_id(btf, type_id);
365 	if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
366 		pr_warn("Cannot support #%u members in struct %s\n",
367 			btf_type_vlen(t), st_ops->name);
368 		return -EINVAL;
369 	}
370 
371 	value_id = btf_find_by_name_kind(btf, value_name,
372 					 BTF_KIND_STRUCT);
373 	if (value_id < 0) {
374 		pr_warn("Cannot find struct %s in %s\n",
375 			value_name, btf_get_name(btf));
376 		return -EINVAL;
377 	}
378 	if (!is_valid_value_type(btf, value_id, t, value_name))
379 		return -EINVAL;
380 
381 	arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
382 			   GFP_KERNEL);
383 	if (!arg_info)
384 		return -ENOMEM;
385 
386 	st_ops_desc->arg_info = arg_info;
387 	st_ops_desc->type = t;
388 	st_ops_desc->type_id = type_id;
389 	st_ops_desc->value_id = value_id;
390 	st_ops_desc->value_type = btf_type_by_id(btf, value_id);
391 
392 	for_each_member(i, t, member) {
393 		const struct btf_type *func_proto, *ret_type;
394 		void **stub_func_addr;
395 		u32 moff;
396 
397 		moff = __btf_member_bit_offset(t, member) / 8;
398 		mname = btf_name_by_offset(btf, member->name_off);
399 		if (!*mname) {
400 			pr_warn("anon member in struct %s is not supported\n",
401 				st_ops->name);
402 			err = -EOPNOTSUPP;
403 			goto errout;
404 		}
405 
406 		if (__btf_member_bitfield_size(t, member)) {
407 			pr_warn("bit field member %s in struct %s is not supported\n",
408 				mname, st_ops->name);
409 			err = -EOPNOTSUPP;
410 			goto errout;
411 		}
412 
413 		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
414 			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
415 				st_ops->name);
416 			err = -EOPNOTSUPP;
417 			goto errout;
418 		}
419 
420 		func_proto = btf_type_resolve_func_ptr(btf,
421 						       member->type,
422 						       NULL);
423 
424 		/* The member is not a function pointer or
425 		 * the function pointer is not supported.
426 		 */
427 		if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
428 			continue;
429 
430 		if (func_proto->type) {
431 			ret_type = btf_type_resolve_ptr(btf, func_proto->type, NULL);
432 			if (ret_type && !__btf_type_is_struct(ret_type)) {
433 				pr_warn("func ptr %s in struct %s returns non-struct pointer, which is not supported\n",
434 					mname, st_ops->name);
435 				err = -EOPNOTSUPP;
436 				goto errout;
437 			}
438 		}
439 
440 		if (btf_distill_func_proto(log, btf,
441 					   func_proto, mname,
442 					   &st_ops->func_models[i])) {
443 			pr_warn("Error in parsing func ptr %s in struct %s\n",
444 				mname, st_ops->name);
445 			err = -EINVAL;
446 			goto errout;
447 		}
448 
449 		stub_func_addr = *(void **)(st_ops->cfi_stubs + moff);
450 		err = prepare_arg_info(btf, st_ops->name, mname,
451 				       func_proto, stub_func_addr,
452 				       arg_info + i);
453 		if (err)
454 			goto errout;
455 	}
456 
457 	if (st_ops->init(btf)) {
458 		pr_warn("Error in init bpf_struct_ops %s\n",
459 			st_ops->name);
460 		err = -EINVAL;
461 		goto errout;
462 	}
463 
464 	return 0;
465 
466 errout:
467 	bpf_struct_ops_desc_release(st_ops_desc);
468 
469 	return err;
470 }
471 
472 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
473 					   void *next_key)
474 {
475 	if (key && *(u32 *)key == 0)
476 		return -ENOENT;
477 
478 	*(u32 *)next_key = 0;
479 	return 0;
480 }
481 
482 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
483 				       void *value)
484 {
485 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
486 	struct bpf_struct_ops_value *uvalue, *kvalue;
487 	enum bpf_struct_ops_state state;
488 	s64 refcnt;
489 
490 	if (unlikely(*(u32 *)key != 0))
491 		return -ENOENT;
492 
493 	kvalue = &st_map->kvalue;
494 	/* Pair with smp_store_release() during map_update */
495 	state = smp_load_acquire(&kvalue->common.state);
496 	if (state == BPF_STRUCT_OPS_STATE_INIT) {
497 		memset(value, 0, map->value_size);
498 		return 0;
499 	}
500 
501 	/* No lock is needed.  state and refcnt do not need
502 	 * to be updated together under atomic context.
503 	 */
504 	uvalue = value;
505 	memcpy(uvalue, st_map->uvalue, map->value_size);
506 	uvalue->common.state = state;
507 
508 	/* This value offers the user space a general estimate of how
509 	 * many sockets are still utilizing this struct_ops for TCP
510 	 * congestion control. The number might not be exact, but it
511 	 * should sufficiently meet our present goals.
512 	 */
513 	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
514 	refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
515 
516 	return 0;
517 }
518 
519 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
520 {
521 	return ERR_PTR(-EINVAL);
522 }
523 
524 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
525 {
526 	u32 i;
527 
528 	for (i = 0; i < st_map->funcs_cnt; i++) {
529 		if (!st_map->links[i])
530 			break;
531 		bpf_link_put(st_map->links[i]);
532 		st_map->links[i] = NULL;
533 	}
534 }
535 
536 static void bpf_struct_ops_map_dissoc_progs(struct bpf_struct_ops_map *st_map)
537 {
538 	u32 i;
539 
540 	for (i = 0; i < st_map->funcs_cnt; i++) {
541 		if (!st_map->links[i])
542 			break;
543 		bpf_prog_disassoc_struct_ops(st_map->links[i]->prog);
544 	}
545 }
546 
547 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
548 {
549 	int i;
550 
551 	for (i = 0; i < st_map->image_pages_cnt; i++)
552 		bpf_struct_ops_image_free(st_map->image_pages[i]);
553 	st_map->image_pages_cnt = 0;
554 }
555 
556 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
557 {
558 	const struct btf_member *member;
559 	u32 i, moff, msize, prev_mend = 0;
560 	const struct btf_type *mtype;
561 
562 	for_each_member(i, t, member) {
563 		moff = __btf_member_bit_offset(t, member) / 8;
564 		if (moff > prev_mend &&
565 		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
566 			return -EINVAL;
567 
568 		mtype = btf_type_by_id(btf, member->type);
569 		mtype = btf_resolve_size(btf, mtype, &msize);
570 		if (IS_ERR(mtype))
571 			return PTR_ERR(mtype);
572 		prev_mend = moff + msize;
573 	}
574 
575 	if (t->size > prev_mend &&
576 	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
577 		return -EINVAL;
578 
579 	return 0;
580 }
581 
582 static void bpf_struct_ops_link_release(struct bpf_link *link)
583 {
584 }
585 
586 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
587 {
588 	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
589 
590 	kfree(tlink);
591 }
592 
593 const struct bpf_link_ops bpf_struct_ops_link_lops = {
594 	.release = bpf_struct_ops_link_release,
595 	.dealloc = bpf_struct_ops_link_dealloc,
596 };
597 
598 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
599 				      struct bpf_tramp_link *link,
600 				      const struct btf_func_model *model,
601 				      void *stub_func,
602 				      void **_image, u32 *_image_off,
603 				      bool allow_alloc)
604 {
605 	u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
606 	void *image = *_image;
607 	int size;
608 
609 	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
610 	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
611 
612 	if (model->ret_size > 0)
613 		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
614 
615 	size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func);
616 	if (size <= 0)
617 		return size ? : -EFAULT;
618 
619 	/* Allocate image buffer if necessary */
620 	if (!image || size > PAGE_SIZE - image_off) {
621 		if (!allow_alloc)
622 			return -E2BIG;
623 
624 		image = bpf_struct_ops_image_alloc();
625 		if (IS_ERR(image))
626 			return PTR_ERR(image);
627 		image_off = 0;
628 	}
629 
630 	size = arch_prepare_bpf_trampoline(NULL, image + image_off,
631 					   image + image_off + size,
632 					   model, flags, tlinks, stub_func);
633 	if (size <= 0) {
634 		if (image != *_image)
635 			bpf_struct_ops_image_free(image);
636 		return size ? : -EFAULT;
637 	}
638 
639 	*_image = image;
640 	*_image_off = image_off + size;
641 	return 0;
642 }
643 
644 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
645 				     void *image, unsigned int size,
646 				     struct bpf_ksym *ksym)
647 {
648 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
649 	INIT_LIST_HEAD_RCU(&ksym->lnode);
650 	bpf_image_ksym_init(image, size, ksym);
651 }
652 
653 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
654 {
655 	u32 i;
656 
657 	for (i = 0; i < st_map->funcs_cnt; i++) {
658 		if (!st_map->ksyms[i])
659 			break;
660 		bpf_image_ksym_add(st_map->ksyms[i]);
661 	}
662 }
663 
664 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
665 {
666 	u32 i;
667 
668 	for (i = 0; i < st_map->funcs_cnt; i++) {
669 		if (!st_map->ksyms[i])
670 			break;
671 		bpf_image_ksym_del(st_map->ksyms[i]);
672 	}
673 }
674 
675 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
676 {
677 	u32 i;
678 
679 	for (i = 0; i < st_map->funcs_cnt; i++) {
680 		if (!st_map->ksyms[i])
681 			break;
682 		kfree(st_map->ksyms[i]);
683 		st_map->ksyms[i] = NULL;
684 	}
685 }
686 
687 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
688 					   void *value, u64 flags)
689 {
690 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
691 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
692 	const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
693 	struct bpf_struct_ops_value *uvalue, *kvalue;
694 	const struct btf_type *module_type;
695 	const struct btf_member *member;
696 	const struct btf_type *t = st_ops_desc->type;
697 	struct bpf_tramp_links *tlinks;
698 	void *udata, *kdata;
699 	int prog_fd, err;
700 	u32 i, trampoline_start, image_off = 0;
701 	void *cur_image = NULL, *image = NULL;
702 	struct bpf_link **plink;
703 	struct bpf_ksym **pksym;
704 	const char *tname, *mname;
705 
706 	if (flags)
707 		return -EINVAL;
708 
709 	if (*(u32 *)key != 0)
710 		return -E2BIG;
711 
712 	err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
713 	if (err)
714 		return err;
715 
716 	uvalue = value;
717 	err = check_zero_holes(st_map->btf, t, uvalue->data);
718 	if (err)
719 		return err;
720 
721 	if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
722 		return -EINVAL;
723 
724 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
725 	if (!tlinks)
726 		return -ENOMEM;
727 
728 	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
729 	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
730 
731 	mutex_lock(&st_map->lock);
732 
733 	if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
734 		err = -EBUSY;
735 		goto unlock;
736 	}
737 
738 	memcpy(uvalue, value, map->value_size);
739 
740 	udata = &uvalue->data;
741 	kdata = &kvalue->data;
742 
743 	plink = st_map->links;
744 	pksym = st_map->ksyms;
745 	tname = btf_name_by_offset(st_map->btf, t->name_off);
746 	module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
747 	for_each_member(i, t, member) {
748 		const struct btf_type *mtype, *ptype;
749 		struct bpf_prog *prog;
750 		struct bpf_tramp_link *link;
751 		struct bpf_ksym *ksym;
752 		u32 moff;
753 
754 		moff = __btf_member_bit_offset(t, member) / 8;
755 		mname = btf_name_by_offset(st_map->btf, member->name_off);
756 		ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
757 		if (ptype == module_type) {
758 			if (*(void **)(udata + moff))
759 				goto reset_unlock;
760 			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
761 			continue;
762 		}
763 
764 		err = st_ops->init_member(t, member, kdata, udata);
765 		if (err < 0)
766 			goto reset_unlock;
767 
768 		/* The ->init_member() has handled this member */
769 		if (err > 0)
770 			continue;
771 
772 		/* If st_ops->init_member does not handle it,
773 		 * we will only handle func ptrs and zero-ed members
774 		 * here.  Reject everything else.
775 		 */
776 
777 		/* All non func ptr member must be 0 */
778 		if (!ptype || !btf_type_is_func_proto(ptype)) {
779 			u32 msize;
780 
781 			mtype = btf_type_by_id(st_map->btf, member->type);
782 			mtype = btf_resolve_size(st_map->btf, mtype, &msize);
783 			if (IS_ERR(mtype)) {
784 				err = PTR_ERR(mtype);
785 				goto reset_unlock;
786 			}
787 
788 			if (memchr_inv(udata + moff, 0, msize)) {
789 				err = -EINVAL;
790 				goto reset_unlock;
791 			}
792 
793 			continue;
794 		}
795 
796 		prog_fd = (int)(*(unsigned long *)(udata + moff));
797 		/* Similar check as the attr->attach_prog_fd */
798 		if (!prog_fd)
799 			continue;
800 
801 		prog = bpf_prog_get(prog_fd);
802 		if (IS_ERR(prog)) {
803 			err = PTR_ERR(prog);
804 			goto reset_unlock;
805 		}
806 
807 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
808 		    prog->aux->attach_btf_id != st_ops_desc->type_id ||
809 		    prog->expected_attach_type != i) {
810 			bpf_prog_put(prog);
811 			err = -EINVAL;
812 			goto reset_unlock;
813 		}
814 
815 		/* Poison pointer on error instead of return for backward compatibility */
816 		bpf_prog_assoc_struct_ops(prog, &st_map->map);
817 
818 		link = kzalloc(sizeof(*link), GFP_USER);
819 		if (!link) {
820 			bpf_prog_put(prog);
821 			err = -ENOMEM;
822 			goto reset_unlock;
823 		}
824 		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
825 			      &bpf_struct_ops_link_lops, prog, prog->expected_attach_type);
826 		*plink++ = &link->link;
827 
828 		ksym = kzalloc(sizeof(*ksym), GFP_USER);
829 		if (!ksym) {
830 			err = -ENOMEM;
831 			goto reset_unlock;
832 		}
833 		*pksym++ = ksym;
834 
835 		trampoline_start = image_off;
836 		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
837 						&st_ops->func_models[i],
838 						*(void **)(st_ops->cfi_stubs + moff),
839 						&image, &image_off,
840 						st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
841 		if (err)
842 			goto reset_unlock;
843 
844 		if (cur_image != image) {
845 			st_map->image_pages[st_map->image_pages_cnt++] = image;
846 			cur_image = image;
847 			trampoline_start = 0;
848 		}
849 
850 		*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
851 
852 		/* put prog_id to udata */
853 		*(unsigned long *)(udata + moff) = prog->aux->id;
854 
855 		/* init ksym for this trampoline */
856 		bpf_struct_ops_ksym_init(tname, mname,
857 					 image + trampoline_start,
858 					 image_off - trampoline_start,
859 					 ksym);
860 	}
861 
862 	if (st_ops->validate) {
863 		err = st_ops->validate(kdata);
864 		if (err)
865 			goto reset_unlock;
866 	}
867 	for (i = 0; i < st_map->image_pages_cnt; i++) {
868 		err = arch_protect_bpf_trampoline(st_map->image_pages[i],
869 						  PAGE_SIZE);
870 		if (err)
871 			goto reset_unlock;
872 	}
873 
874 	if (st_map->map.map_flags & BPF_F_LINK) {
875 		err = 0;
876 		/* Let bpf_link handle registration & unregistration.
877 		 *
878 		 * Pair with smp_load_acquire() during lookup_elem().
879 		 */
880 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
881 		goto unlock;
882 	}
883 
884 	err = st_ops->reg(kdata, NULL);
885 	if (likely(!err)) {
886 		/* This refcnt increment on the map here after
887 		 * 'st_ops->reg()' is secure since the state of the
888 		 * map must be set to INIT at this moment, and thus
889 		 * bpf_struct_ops_map_delete_elem() can't unregister
890 		 * or transition it to TOBEFREE concurrently.
891 		 */
892 		bpf_map_inc(map);
893 		/* Pair with smp_load_acquire() during lookup_elem().
894 		 * It ensures the above udata updates (e.g. prog->aux->id)
895 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
896 		 */
897 		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
898 		goto unlock;
899 	}
900 
901 	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
902 	 * verified as a whole, after all init_member() calls. Can also happen if
903 	 * there was a race in registering the struct_ops (under the same name) to
904 	 * a sub-system through different struct_ops's maps.
905 	 */
906 
907 reset_unlock:
908 	bpf_struct_ops_map_free_ksyms(st_map);
909 	bpf_struct_ops_map_free_image(st_map);
910 	bpf_struct_ops_map_put_progs(st_map);
911 	memset(uvalue, 0, map->value_size);
912 	memset(kvalue, 0, map->value_size);
913 unlock:
914 	kfree(tlinks);
915 	mutex_unlock(&st_map->lock);
916 	if (!err)
917 		bpf_struct_ops_map_add_ksyms(st_map);
918 	return err;
919 }
920 
921 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
922 {
923 	enum bpf_struct_ops_state prev_state;
924 	struct bpf_struct_ops_map *st_map;
925 
926 	st_map = (struct bpf_struct_ops_map *)map;
927 	if (st_map->map.map_flags & BPF_F_LINK)
928 		return -EOPNOTSUPP;
929 
930 	prev_state = cmpxchg(&st_map->kvalue.common.state,
931 			     BPF_STRUCT_OPS_STATE_INUSE,
932 			     BPF_STRUCT_OPS_STATE_TOBEFREE);
933 	switch (prev_state) {
934 	case BPF_STRUCT_OPS_STATE_INUSE:
935 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
936 		bpf_map_put(map);
937 		return 0;
938 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
939 		return -EINPROGRESS;
940 	case BPF_STRUCT_OPS_STATE_INIT:
941 		return -ENOENT;
942 	default:
943 		WARN_ON_ONCE(1);
944 		/* Should never happen.  Treat it as not found. */
945 		return -ENOENT;
946 	}
947 }
948 
949 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
950 					     struct seq_file *m)
951 {
952 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
953 	void *value;
954 	int err;
955 
956 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
957 	if (!value)
958 		return;
959 
960 	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
961 	if (!err) {
962 		btf_type_seq_show(st_map->btf,
963 				  map->btf_vmlinux_value_type_id,
964 				  value, m);
965 		seq_putc(m, '\n');
966 	}
967 
968 	kfree(value);
969 }
970 
971 static void __bpf_struct_ops_map_free(struct bpf_map *map)
972 {
973 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
974 
975 	if (st_map->links)
976 		bpf_struct_ops_map_put_progs(st_map);
977 	if (st_map->ksyms)
978 		bpf_struct_ops_map_free_ksyms(st_map);
979 	bpf_map_area_free(st_map->links);
980 	bpf_map_area_free(st_map->ksyms);
981 	bpf_struct_ops_map_free_image(st_map);
982 	bpf_map_area_free(st_map->uvalue);
983 	bpf_map_area_free(st_map);
984 }
985 
986 static void bpf_struct_ops_map_free(struct bpf_map *map)
987 {
988 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
989 
990 	/* st_ops->owner was acquired during map_alloc to implicitly holds
991 	 * the btf's refcnt. The acquire was only done when btf_is_module()
992 	 * st_map->btf cannot be NULL here.
993 	 */
994 	if (btf_is_module(st_map->btf))
995 		module_put(st_map->st_ops_desc->st_ops->owner);
996 
997 	bpf_struct_ops_map_dissoc_progs(st_map);
998 
999 	bpf_struct_ops_map_del_ksyms(st_map);
1000 
1001 	/* The struct_ops's function may switch to another struct_ops.
1002 	 *
1003 	 * For example, bpf_tcp_cc_x->init() may switch to
1004 	 * another tcp_cc_y by calling
1005 	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
1006 	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
1007 	 * and its refcount may reach 0 which then free its
1008 	 * trampoline image while tcp_cc_x is still running.
1009 	 *
1010 	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
1011 	 * to finish. bpf-tcp-cc prog is non sleepable.
1012 	 * A rcu_tasks gp is to wait for the last few insn
1013 	 * in the tramopline image to finish before releasing
1014 	 * the trampoline image.
1015 	 */
1016 	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
1017 
1018 	__bpf_struct_ops_map_free(map);
1019 }
1020 
1021 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
1022 {
1023 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
1024 	    (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
1025 	    !attr->btf_vmlinux_value_type_id)
1026 		return -EINVAL;
1027 	return 0;
1028 }
1029 
1030 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
1031 {
1032 	int i;
1033 	u32 count;
1034 	const struct btf_member *member;
1035 
1036 	count = 0;
1037 	for_each_member(i, t, member)
1038 		if (btf_type_resolve_func_ptr(btf, member->type, NULL))
1039 			count++;
1040 	return count;
1041 }
1042 
1043 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1044 {
1045 	const struct bpf_struct_ops_desc *st_ops_desc;
1046 	size_t st_map_size;
1047 	struct bpf_struct_ops_map *st_map;
1048 	const struct btf_type *t, *vt;
1049 	struct module *mod = NULL;
1050 	struct bpf_map *map;
1051 	struct btf *btf;
1052 	int ret;
1053 
1054 	if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1055 		/* The map holds btf for its whole life time. */
1056 		btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1057 		if (IS_ERR(btf))
1058 			return ERR_CAST(btf);
1059 		if (!btf_is_module(btf)) {
1060 			btf_put(btf);
1061 			return ERR_PTR(-EINVAL);
1062 		}
1063 
1064 		mod = btf_try_get_module(btf);
1065 		/* mod holds a refcnt to btf. We don't need an extra refcnt
1066 		 * here.
1067 		 */
1068 		btf_put(btf);
1069 		if (!mod)
1070 			return ERR_PTR(-EINVAL);
1071 	} else {
1072 		btf = bpf_get_btf_vmlinux();
1073 		if (IS_ERR(btf))
1074 			return ERR_CAST(btf);
1075 		if (!btf)
1076 			return ERR_PTR(-ENOTSUPP);
1077 	}
1078 
1079 	st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1080 	if (!st_ops_desc) {
1081 		ret = -ENOTSUPP;
1082 		goto errout;
1083 	}
1084 
1085 	vt = st_ops_desc->value_type;
1086 	if (attr->value_size != vt->size) {
1087 		ret = -EINVAL;
1088 		goto errout;
1089 	}
1090 
1091 	t = st_ops_desc->type;
1092 
1093 	st_map_size = sizeof(*st_map) +
1094 		/* kvalue stores the
1095 		 * struct bpf_struct_ops_tcp_congestions_ops
1096 		 */
1097 		(vt->size - sizeof(struct bpf_struct_ops_value));
1098 
1099 	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1100 	if (!st_map) {
1101 		ret = -ENOMEM;
1102 		goto errout;
1103 	}
1104 
1105 	st_map->st_ops_desc = st_ops_desc;
1106 	map = &st_map->map;
1107 
1108 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1109 	st_map->funcs_cnt = count_func_ptrs(btf, t);
1110 	st_map->links =
1111 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1112 				   NUMA_NO_NODE);
1113 
1114 	st_map->ksyms =
1115 		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1116 				   NUMA_NO_NODE);
1117 	if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1118 		ret = -ENOMEM;
1119 		goto errout_free;
1120 	}
1121 	st_map->btf = btf;
1122 
1123 	mutex_init(&st_map->lock);
1124 	bpf_map_init_from_attr(map, attr);
1125 
1126 	return map;
1127 
1128 errout_free:
1129 	__bpf_struct_ops_map_free(map);
1130 errout:
1131 	module_put(mod);
1132 
1133 	return ERR_PTR(ret);
1134 }
1135 
1136 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1137 {
1138 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1139 	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1140 	const struct btf_type *vt = st_ops_desc->value_type;
1141 	u64 usage;
1142 
1143 	usage = sizeof(*st_map) +
1144 			vt->size - sizeof(struct bpf_struct_ops_value);
1145 	usage += vt->size;
1146 	usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1147 	usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1148 	usage += PAGE_SIZE;
1149 	return usage;
1150 }
1151 
1152 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1153 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1154 	.map_alloc_check = bpf_struct_ops_map_alloc_check,
1155 	.map_alloc = bpf_struct_ops_map_alloc,
1156 	.map_free = bpf_struct_ops_map_free,
1157 	.map_get_next_key = bpf_struct_ops_map_get_next_key,
1158 	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1159 	.map_delete_elem = bpf_struct_ops_map_delete_elem,
1160 	.map_update_elem = bpf_struct_ops_map_update_elem,
1161 	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1162 	.map_mem_usage = bpf_struct_ops_map_mem_usage,
1163 	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1164 };
1165 
1166 /* "const void *" because some subsystem is
1167  * passing a const (e.g. const struct tcp_congestion_ops *)
1168  */
1169 bool bpf_struct_ops_get(const void *kdata)
1170 {
1171 	struct bpf_struct_ops_value *kvalue;
1172 	struct bpf_struct_ops_map *st_map;
1173 	struct bpf_map *map;
1174 
1175 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1176 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1177 
1178 	map = __bpf_map_inc_not_zero(&st_map->map, false);
1179 	return !IS_ERR(map);
1180 }
1181 EXPORT_SYMBOL_GPL(bpf_struct_ops_get);
1182 
1183 void bpf_struct_ops_put(const void *kdata)
1184 {
1185 	struct bpf_struct_ops_value *kvalue;
1186 	struct bpf_struct_ops_map *st_map;
1187 
1188 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1189 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1190 
1191 	bpf_map_put(&st_map->map);
1192 }
1193 EXPORT_SYMBOL_GPL(bpf_struct_ops_put);
1194 
1195 u32 bpf_struct_ops_id(const void *kdata)
1196 {
1197 	struct bpf_struct_ops_value *kvalue;
1198 	struct bpf_struct_ops_map *st_map;
1199 
1200 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1201 	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1202 
1203 	return st_map->map.id;
1204 }
1205 EXPORT_SYMBOL_GPL(bpf_struct_ops_id);
1206 
1207 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1208 {
1209 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1210 
1211 	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1212 		map->map_flags & BPF_F_LINK &&
1213 		/* Pair with smp_store_release() during map_update */
1214 		smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1215 }
1216 
1217 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1218 {
1219 	struct bpf_struct_ops_link *st_link;
1220 	struct bpf_struct_ops_map *st_map;
1221 
1222 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1223 	st_map = (struct bpf_struct_ops_map *)
1224 		rcu_dereference_protected(st_link->map, true);
1225 	if (st_map) {
1226 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1227 		bpf_map_put(&st_map->map);
1228 	}
1229 	kfree(st_link);
1230 }
1231 
1232 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1233 					    struct seq_file *seq)
1234 {
1235 	struct bpf_struct_ops_link *st_link;
1236 	struct bpf_map *map;
1237 
1238 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1239 	rcu_read_lock();
1240 	map = rcu_dereference(st_link->map);
1241 	if (map)
1242 		seq_printf(seq, "map_id:\t%d\n", map->id);
1243 	rcu_read_unlock();
1244 }
1245 
1246 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1247 					       struct bpf_link_info *info)
1248 {
1249 	struct bpf_struct_ops_link *st_link;
1250 	struct bpf_map *map;
1251 
1252 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1253 	rcu_read_lock();
1254 	map = rcu_dereference(st_link->map);
1255 	if (map)
1256 		info->struct_ops.map_id = map->id;
1257 	rcu_read_unlock();
1258 	return 0;
1259 }
1260 
1261 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1262 					  struct bpf_map *expected_old_map)
1263 {
1264 	struct bpf_struct_ops_map *st_map, *old_st_map;
1265 	struct bpf_map *old_map;
1266 	struct bpf_struct_ops_link *st_link;
1267 	int err;
1268 
1269 	st_link = container_of(link, struct bpf_struct_ops_link, link);
1270 	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1271 
1272 	if (!bpf_struct_ops_valid_to_reg(new_map))
1273 		return -EINVAL;
1274 
1275 	if (!st_map->st_ops_desc->st_ops->update)
1276 		return -EOPNOTSUPP;
1277 
1278 	mutex_lock(&update_mutex);
1279 
1280 	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1281 	if (!old_map) {
1282 		err = -ENOLINK;
1283 		goto err_out;
1284 	}
1285 	if (expected_old_map && old_map != expected_old_map) {
1286 		err = -EPERM;
1287 		goto err_out;
1288 	}
1289 
1290 	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1291 	/* The new and old struct_ops must be the same type. */
1292 	if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1293 		err = -EINVAL;
1294 		goto err_out;
1295 	}
1296 
1297 	err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1298 	if (err)
1299 		goto err_out;
1300 
1301 	bpf_map_inc(new_map);
1302 	rcu_assign_pointer(st_link->map, new_map);
1303 	bpf_map_put(old_map);
1304 
1305 err_out:
1306 	mutex_unlock(&update_mutex);
1307 
1308 	return err;
1309 }
1310 
1311 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1312 {
1313 	struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1314 	struct bpf_struct_ops_map *st_map;
1315 	struct bpf_map *map;
1316 
1317 	mutex_lock(&update_mutex);
1318 
1319 	map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1320 	if (!map) {
1321 		mutex_unlock(&update_mutex);
1322 		return 0;
1323 	}
1324 	st_map = container_of(map, struct bpf_struct_ops_map, map);
1325 
1326 	st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1327 
1328 	RCU_INIT_POINTER(st_link->map, NULL);
1329 	/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1330 	 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1331 	 */
1332 	bpf_map_put(&st_map->map);
1333 
1334 	mutex_unlock(&update_mutex);
1335 
1336 	wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1337 
1338 	return 0;
1339 }
1340 
1341 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1342 					     struct poll_table_struct *pts)
1343 {
1344 	struct bpf_struct_ops_link *st_link = file->private_data;
1345 
1346 	poll_wait(file, &st_link->wait_hup, pts);
1347 
1348 	return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1349 }
1350 
1351 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1352 	.dealloc = bpf_struct_ops_map_link_dealloc,
1353 	.detach = bpf_struct_ops_map_link_detach,
1354 	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1355 	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1356 	.update_map = bpf_struct_ops_map_link_update,
1357 	.poll = bpf_struct_ops_map_link_poll,
1358 };
1359 
1360 int bpf_struct_ops_link_create(union bpf_attr *attr)
1361 {
1362 	struct bpf_struct_ops_link *link = NULL;
1363 	struct bpf_link_primer link_primer;
1364 	struct bpf_struct_ops_map *st_map;
1365 	struct bpf_map *map;
1366 	int err;
1367 
1368 	map = bpf_map_get(attr->link_create.map_fd);
1369 	if (IS_ERR(map))
1370 		return PTR_ERR(map);
1371 
1372 	st_map = (struct bpf_struct_ops_map *)map;
1373 
1374 	if (!bpf_struct_ops_valid_to_reg(map)) {
1375 		err = -EINVAL;
1376 		goto err_out;
1377 	}
1378 
1379 	link = kzalloc(sizeof(*link), GFP_USER);
1380 	if (!link) {
1381 		err = -ENOMEM;
1382 		goto err_out;
1383 	}
1384 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL,
1385 		      attr->link_create.attach_type);
1386 
1387 	err = bpf_link_prime(&link->link, &link_primer);
1388 	if (err)
1389 		goto err_out;
1390 
1391 	init_waitqueue_head(&link->wait_hup);
1392 
1393 	/* Hold the update_mutex such that the subsystem cannot
1394 	 * do link->ops->detach() before the link is fully initialized.
1395 	 */
1396 	mutex_lock(&update_mutex);
1397 	err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1398 	if (err) {
1399 		mutex_unlock(&update_mutex);
1400 		bpf_link_cleanup(&link_primer);
1401 		link = NULL;
1402 		goto err_out;
1403 	}
1404 	RCU_INIT_POINTER(link->map, map);
1405 	mutex_unlock(&update_mutex);
1406 
1407 	return bpf_link_settle(&link_primer);
1408 
1409 err_out:
1410 	bpf_map_put(map);
1411 	kfree(link);
1412 	return err;
1413 }
1414 
1415 int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
1416 {
1417 	struct bpf_map *st_ops_assoc;
1418 
1419 	guard(mutex)(&prog->aux->st_ops_assoc_mutex);
1420 
1421 	st_ops_assoc = rcu_dereference_protected(prog->aux->st_ops_assoc,
1422 						 lockdep_is_held(&prog->aux->st_ops_assoc_mutex));
1423 	if (st_ops_assoc && st_ops_assoc == map)
1424 		return 0;
1425 
1426 	if (st_ops_assoc) {
1427 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1428 			return -EBUSY;
1429 
1430 		rcu_assign_pointer(prog->aux->st_ops_assoc, BPF_PTR_POISON);
1431 	} else {
1432 		/*
1433 		 * struct_ops map does not track associated non-struct_ops programs.
1434 		 * Bump the refcount to make sure st_ops_assoc is always valid.
1435 		 */
1436 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1437 			bpf_map_inc(map);
1438 
1439 		rcu_assign_pointer(prog->aux->st_ops_assoc, map);
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog)
1446 {
1447 	struct bpf_map *st_ops_assoc;
1448 
1449 	guard(mutex)(&prog->aux->st_ops_assoc_mutex);
1450 
1451 	st_ops_assoc = rcu_dereference_protected(prog->aux->st_ops_assoc,
1452 						 lockdep_is_held(&prog->aux->st_ops_assoc_mutex));
1453 	if (!st_ops_assoc || st_ops_assoc == BPF_PTR_POISON)
1454 		return;
1455 
1456 	if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1457 		bpf_map_put(st_ops_assoc);
1458 
1459 	RCU_INIT_POINTER(prog->aux->st_ops_assoc, NULL);
1460 }
1461 
1462 /*
1463  * Get a reference to the struct_ops struct (i.e., kdata) associated with a
1464  * program. Should only be called in BPF program context (e.g., in a kfunc).
1465  *
1466  * If the returned pointer is not NULL, it must points to a valid struct_ops.
1467  * The struct_ops map is not guaranteed to be initialized nor attached.
1468  * Kernel struct_ops implementers are responsible for tracking and checking
1469  * the state of the struct_ops if the use case requires an initialized or
1470  * attached struct_ops.
1471  */
1472 void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux)
1473 {
1474 	struct bpf_struct_ops_map *st_map;
1475 	struct bpf_map *st_ops_assoc;
1476 
1477 	st_ops_assoc = rcu_dereference_check(aux->st_ops_assoc, bpf_rcu_lock_held());
1478 	if (!st_ops_assoc || st_ops_assoc == BPF_PTR_POISON)
1479 		return NULL;
1480 
1481 	st_map = (struct bpf_struct_ops_map *)st_ops_assoc;
1482 
1483 	return &st_map->kvalue.data;
1484 }
1485 EXPORT_SYMBOL_GPL(bpf_prog_get_assoc_struct_ops);
1486 
1487 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1488 {
1489 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1490 
1491 	info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1492 }
1493