1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16
17 struct bpf_struct_ops_value {
18 struct bpf_struct_ops_common_value common;
19 char data[] ____cacheline_aligned_in_smp;
20 };
21
22 #define MAX_TRAMP_IMAGE_PAGES 8
23
24 struct bpf_struct_ops_map {
25 struct bpf_map map;
26 const struct bpf_struct_ops_desc *st_ops_desc;
27 /* protect map_update */
28 struct mutex lock;
29 /* link has all the bpf_links that is populated
30 * to the func ptr of the kernel's struct
31 * (in kvalue.data).
32 */
33 struct bpf_link **links;
34 /* ksyms for bpf trampolines */
35 struct bpf_ksym **ksyms;
36 u32 funcs_cnt;
37 u32 image_pages_cnt;
38 /* image_pages is an array of pages that has all the trampolines
39 * that stores the func args before calling the bpf_prog.
40 */
41 void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 /* The owner moduler's btf. */
43 struct btf *btf;
44 /* uvalue->data stores the kernel struct
45 * (e.g. tcp_congestion_ops) that is more useful
46 * to userspace than the kvalue. For example,
47 * the bpf_prog's id is stored instead of the kernel
48 * address of a func ptr.
49 */
50 struct bpf_struct_ops_value *uvalue;
51 /* kvalue.data stores the actual kernel's struct
52 * (e.g. tcp_congestion_ops) that will be
53 * registered to the kernel subsystem.
54 */
55 struct bpf_struct_ops_value kvalue;
56 };
57
58 struct bpf_struct_ops_link {
59 struct bpf_link link;
60 struct bpf_map __rcu *map;
61 wait_queue_head_t wait_hup;
62 };
63
64 static DEFINE_MUTEX(update_mutex);
65
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
68
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
70 };
71
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
73 #ifdef CONFIG_NET
74 .test_run = bpf_struct_ops_test_run,
75 #endif
76 };
77
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
81
82 enum {
83 IDX_MODULE_ID,
84 IDX_ST_OPS_COMMON_VALUE_ID,
85 };
86
87 extern struct btf *btf_vmlinux;
88
is_valid_value_type(struct btf * btf,s32 value_id,const struct btf_type * type,const char * value_name)89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 const struct btf_type *type,
91 const char *value_name)
92 {
93 const struct btf_type *common_value_type;
94 const struct btf_member *member;
95 const struct btf_type *vt, *mt;
96
97 vt = btf_type_by_id(btf, value_id);
98 if (btf_vlen(vt) != 2) {
99 pr_warn("The number of %s's members should be 2, but we get %d\n",
100 value_name, btf_vlen(vt));
101 return false;
102 }
103 member = btf_type_member(vt);
104 mt = btf_type_by_id(btf, member->type);
105 common_value_type = btf_type_by_id(btf_vmlinux,
106 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 if (mt != common_value_type) {
108 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
109 value_name);
110 return false;
111 }
112 member++;
113 mt = btf_type_by_id(btf, member->type);
114 if (mt != type) {
115 pr_warn("The second member of %s should be %s\n",
116 value_name, btf_name_by_offset(btf, type->name_off));
117 return false;
118 }
119
120 return true;
121 }
122
bpf_struct_ops_image_alloc(void)123 static void *bpf_struct_ops_image_alloc(void)
124 {
125 void *image;
126 int err;
127
128 err = bpf_jit_charge_modmem(PAGE_SIZE);
129 if (err)
130 return ERR_PTR(err);
131 image = arch_alloc_bpf_trampoline(PAGE_SIZE);
132 if (!image) {
133 bpf_jit_uncharge_modmem(PAGE_SIZE);
134 return ERR_PTR(-ENOMEM);
135 }
136
137 return image;
138 }
139
bpf_struct_ops_image_free(void * image)140 void bpf_struct_ops_image_free(void *image)
141 {
142 if (image) {
143 arch_free_bpf_trampoline(image, PAGE_SIZE);
144 bpf_jit_uncharge_modmem(PAGE_SIZE);
145 }
146 }
147
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define MAX_STUB_NAME 128
150
151 /* Return the type info of a stub function, if it exists.
152 *
153 * The name of a stub function is made up of the name of the struct_ops and
154 * the name of the function pointer member, separated by "__". For example,
155 * if the struct_ops type is named "foo_ops" and the function pointer
156 * member is named "bar", the stub function name would be "foo_ops__bar".
157 */
158 static const struct btf_type *
find_stub_func_proto(const struct btf * btf,const char * st_op_name,const char * member_name)159 find_stub_func_proto(const struct btf *btf, const char *st_op_name,
160 const char *member_name)
161 {
162 char stub_func_name[MAX_STUB_NAME];
163 const struct btf_type *func_type;
164 s32 btf_id;
165 int cp;
166
167 cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
168 st_op_name, member_name);
169 if (cp >= MAX_STUB_NAME) {
170 pr_warn("Stub function name too long\n");
171 return NULL;
172 }
173 btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
174 if (btf_id < 0)
175 return NULL;
176 func_type = btf_type_by_id(btf, btf_id);
177 if (!func_type)
178 return NULL;
179
180 return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
181 }
182
183 /* Prepare argument info for every nullable argument of a member of a
184 * struct_ops type.
185 *
186 * Initialize a struct bpf_struct_ops_arg_info according to type info of
187 * the arguments of a stub function. (Check kCFI for more information about
188 * stub functions.)
189 *
190 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
191 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
192 * the information that used by the verifier to check the arguments of the
193 * BPF struct_ops program assigned to the member. Here, we only care about
194 * the arguments that are marked as __nullable.
195 *
196 * The array of struct bpf_ctx_arg_aux is eventually assigned to
197 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
198 * verifier. (See check_struct_ops_btf_id())
199 *
200 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
201 * fails, it will be kept untouched.
202 */
prepare_arg_info(struct btf * btf,const char * st_ops_name,const char * member_name,const struct btf_type * func_proto,struct bpf_struct_ops_arg_info * arg_info)203 static int prepare_arg_info(struct btf *btf,
204 const char *st_ops_name,
205 const char *member_name,
206 const struct btf_type *func_proto,
207 struct bpf_struct_ops_arg_info *arg_info)
208 {
209 const struct btf_type *stub_func_proto, *pointed_type;
210 const struct btf_param *stub_args, *args;
211 struct bpf_ctx_arg_aux *info, *info_buf;
212 u32 nargs, arg_no, info_cnt = 0;
213 u32 arg_btf_id;
214 int offset;
215
216 stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
217 if (!stub_func_proto)
218 return 0;
219
220 /* Check if the number of arguments of the stub function is the same
221 * as the number of arguments of the function pointer.
222 */
223 nargs = btf_type_vlen(func_proto);
224 if (nargs != btf_type_vlen(stub_func_proto)) {
225 pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
226 st_ops_name, member_name, member_name, st_ops_name);
227 return -EINVAL;
228 }
229
230 if (!nargs)
231 return 0;
232
233 args = btf_params(func_proto);
234 stub_args = btf_params(stub_func_proto);
235
236 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
237 if (!info_buf)
238 return -ENOMEM;
239
240 /* Prepare info for every nullable argument */
241 info = info_buf;
242 for (arg_no = 0; arg_no < nargs; arg_no++) {
243 /* Skip arguments that is not suffixed with
244 * "__nullable".
245 */
246 if (!btf_param_match_suffix(btf, &stub_args[arg_no],
247 MAYBE_NULL_SUFFIX))
248 continue;
249
250 /* Should be a pointer to struct */
251 pointed_type = btf_type_resolve_ptr(btf,
252 args[arg_no].type,
253 &arg_btf_id);
254 if (!pointed_type ||
255 !btf_type_is_struct(pointed_type)) {
256 pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
257 st_ops_name, member_name, MAYBE_NULL_SUFFIX);
258 goto err_out;
259 }
260
261 offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
262 if (offset < 0) {
263 pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
264 st_ops_name, member_name, arg_no);
265 goto err_out;
266 }
267
268 if (args[arg_no].type != stub_args[arg_no].type) {
269 pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
270 arg_no, st_ops_name, member_name);
271 goto err_out;
272 }
273
274 /* Fill the information of the new argument */
275 info->reg_type =
276 PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
277 info->btf_id = arg_btf_id;
278 info->btf = btf;
279 info->offset = offset;
280
281 info++;
282 info_cnt++;
283 }
284
285 if (info_cnt) {
286 arg_info->info = info_buf;
287 arg_info->cnt = info_cnt;
288 } else {
289 kfree(info_buf);
290 }
291
292 return 0;
293
294 err_out:
295 kfree(info_buf);
296
297 return -EINVAL;
298 }
299
300 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)301 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
302 {
303 struct bpf_struct_ops_arg_info *arg_info;
304 int i;
305
306 arg_info = st_ops_desc->arg_info;
307 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
308 kfree(arg_info[i].info);
309
310 kfree(arg_info);
311 }
312
bpf_struct_ops_desc_init(struct bpf_struct_ops_desc * st_ops_desc,struct btf * btf,struct bpf_verifier_log * log)313 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
314 struct btf *btf,
315 struct bpf_verifier_log *log)
316 {
317 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
318 struct bpf_struct_ops_arg_info *arg_info;
319 const struct btf_member *member;
320 const struct btf_type *t;
321 s32 type_id, value_id;
322 char value_name[128];
323 const char *mname;
324 int i, err;
325
326 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
327 sizeof(value_name)) {
328 pr_warn("struct_ops name %s is too long\n",
329 st_ops->name);
330 return -EINVAL;
331 }
332 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
333
334 if (!st_ops->cfi_stubs) {
335 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
336 return -EINVAL;
337 }
338
339 type_id = btf_find_by_name_kind(btf, st_ops->name,
340 BTF_KIND_STRUCT);
341 if (type_id < 0) {
342 pr_warn("Cannot find struct %s in %s\n",
343 st_ops->name, btf_get_name(btf));
344 return -EINVAL;
345 }
346 t = btf_type_by_id(btf, type_id);
347 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
348 pr_warn("Cannot support #%u members in struct %s\n",
349 btf_type_vlen(t), st_ops->name);
350 return -EINVAL;
351 }
352
353 value_id = btf_find_by_name_kind(btf, value_name,
354 BTF_KIND_STRUCT);
355 if (value_id < 0) {
356 pr_warn("Cannot find struct %s in %s\n",
357 value_name, btf_get_name(btf));
358 return -EINVAL;
359 }
360 if (!is_valid_value_type(btf, value_id, t, value_name))
361 return -EINVAL;
362
363 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
364 GFP_KERNEL);
365 if (!arg_info)
366 return -ENOMEM;
367
368 st_ops_desc->arg_info = arg_info;
369 st_ops_desc->type = t;
370 st_ops_desc->type_id = type_id;
371 st_ops_desc->value_id = value_id;
372 st_ops_desc->value_type = btf_type_by_id(btf, value_id);
373
374 for_each_member(i, t, member) {
375 const struct btf_type *func_proto;
376
377 mname = btf_name_by_offset(btf, member->name_off);
378 if (!*mname) {
379 pr_warn("anon member in struct %s is not supported\n",
380 st_ops->name);
381 err = -EOPNOTSUPP;
382 goto errout;
383 }
384
385 if (__btf_member_bitfield_size(t, member)) {
386 pr_warn("bit field member %s in struct %s is not supported\n",
387 mname, st_ops->name);
388 err = -EOPNOTSUPP;
389 goto errout;
390 }
391
392 func_proto = btf_type_resolve_func_ptr(btf,
393 member->type,
394 NULL);
395 if (!func_proto)
396 continue;
397
398 if (btf_distill_func_proto(log, btf,
399 func_proto, mname,
400 &st_ops->func_models[i])) {
401 pr_warn("Error in parsing func ptr %s in struct %s\n",
402 mname, st_ops->name);
403 err = -EINVAL;
404 goto errout;
405 }
406
407 err = prepare_arg_info(btf, st_ops->name, mname,
408 func_proto,
409 arg_info + i);
410 if (err)
411 goto errout;
412 }
413
414 if (st_ops->init(btf)) {
415 pr_warn("Error in init bpf_struct_ops %s\n",
416 st_ops->name);
417 err = -EINVAL;
418 goto errout;
419 }
420
421 return 0;
422
423 errout:
424 bpf_struct_ops_desc_release(st_ops_desc);
425
426 return err;
427 }
428
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)429 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
430 void *next_key)
431 {
432 if (key && *(u32 *)key == 0)
433 return -ENOENT;
434
435 *(u32 *)next_key = 0;
436 return 0;
437 }
438
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)439 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
440 void *value)
441 {
442 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
443 struct bpf_struct_ops_value *uvalue, *kvalue;
444 enum bpf_struct_ops_state state;
445 s64 refcnt;
446
447 if (unlikely(*(u32 *)key != 0))
448 return -ENOENT;
449
450 kvalue = &st_map->kvalue;
451 /* Pair with smp_store_release() during map_update */
452 state = smp_load_acquire(&kvalue->common.state);
453 if (state == BPF_STRUCT_OPS_STATE_INIT) {
454 memset(value, 0, map->value_size);
455 return 0;
456 }
457
458 /* No lock is needed. state and refcnt do not need
459 * to be updated together under atomic context.
460 */
461 uvalue = value;
462 memcpy(uvalue, st_map->uvalue, map->value_size);
463 uvalue->common.state = state;
464
465 /* This value offers the user space a general estimate of how
466 * many sockets are still utilizing this struct_ops for TCP
467 * congestion control. The number might not be exact, but it
468 * should sufficiently meet our present goals.
469 */
470 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
471 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
472
473 return 0;
474 }
475
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)476 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
477 {
478 return ERR_PTR(-EINVAL);
479 }
480
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)481 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
482 {
483 u32 i;
484
485 for (i = 0; i < st_map->funcs_cnt; i++) {
486 if (!st_map->links[i])
487 break;
488 bpf_link_put(st_map->links[i]);
489 st_map->links[i] = NULL;
490 }
491 }
492
bpf_struct_ops_map_free_image(struct bpf_struct_ops_map * st_map)493 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
494 {
495 int i;
496
497 for (i = 0; i < st_map->image_pages_cnt; i++)
498 bpf_struct_ops_image_free(st_map->image_pages[i]);
499 st_map->image_pages_cnt = 0;
500 }
501
check_zero_holes(const struct btf * btf,const struct btf_type * t,void * data)502 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
503 {
504 const struct btf_member *member;
505 u32 i, moff, msize, prev_mend = 0;
506 const struct btf_type *mtype;
507
508 for_each_member(i, t, member) {
509 moff = __btf_member_bit_offset(t, member) / 8;
510 if (moff > prev_mend &&
511 memchr_inv(data + prev_mend, 0, moff - prev_mend))
512 return -EINVAL;
513
514 mtype = btf_type_by_id(btf, member->type);
515 mtype = btf_resolve_size(btf, mtype, &msize);
516 if (IS_ERR(mtype))
517 return PTR_ERR(mtype);
518 prev_mend = moff + msize;
519 }
520
521 if (t->size > prev_mend &&
522 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
523 return -EINVAL;
524
525 return 0;
526 }
527
bpf_struct_ops_link_release(struct bpf_link * link)528 static void bpf_struct_ops_link_release(struct bpf_link *link)
529 {
530 }
531
bpf_struct_ops_link_dealloc(struct bpf_link * link)532 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
533 {
534 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
535
536 kfree(tlink);
537 }
538
539 const struct bpf_link_ops bpf_struct_ops_link_lops = {
540 .release = bpf_struct_ops_link_release,
541 .dealloc = bpf_struct_ops_link_dealloc,
542 };
543
bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links * tlinks,struct bpf_tramp_link * link,const struct btf_func_model * model,void * stub_func,void ** _image,u32 * _image_off,bool allow_alloc)544 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
545 struct bpf_tramp_link *link,
546 const struct btf_func_model *model,
547 void *stub_func,
548 void **_image, u32 *_image_off,
549 bool allow_alloc)
550 {
551 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
552 void *image = *_image;
553 int size;
554
555 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
556 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
557
558 if (model->ret_size > 0)
559 flags |= BPF_TRAMP_F_RET_FENTRY_RET;
560
561 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
562 if (size <= 0)
563 return size ? : -EFAULT;
564
565 /* Allocate image buffer if necessary */
566 if (!image || size > PAGE_SIZE - image_off) {
567 if (!allow_alloc)
568 return -E2BIG;
569
570 image = bpf_struct_ops_image_alloc();
571 if (IS_ERR(image))
572 return PTR_ERR(image);
573 image_off = 0;
574 }
575
576 size = arch_prepare_bpf_trampoline(NULL, image + image_off,
577 image + image_off + size,
578 model, flags, tlinks, stub_func);
579 if (size <= 0) {
580 if (image != *_image)
581 bpf_struct_ops_image_free(image);
582 return size ? : -EFAULT;
583 }
584
585 *_image = image;
586 *_image_off = image_off + size;
587 return 0;
588 }
589
bpf_struct_ops_ksym_init(const char * tname,const char * mname,void * image,unsigned int size,struct bpf_ksym * ksym)590 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
591 void *image, unsigned int size,
592 struct bpf_ksym *ksym)
593 {
594 snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
595 INIT_LIST_HEAD_RCU(&ksym->lnode);
596 bpf_image_ksym_init(image, size, ksym);
597 }
598
bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map * st_map)599 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
600 {
601 u32 i;
602
603 for (i = 0; i < st_map->funcs_cnt; i++) {
604 if (!st_map->ksyms[i])
605 break;
606 bpf_image_ksym_add(st_map->ksyms[i]);
607 }
608 }
609
bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map * st_map)610 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
611 {
612 u32 i;
613
614 for (i = 0; i < st_map->funcs_cnt; i++) {
615 if (!st_map->ksyms[i])
616 break;
617 bpf_image_ksym_del(st_map->ksyms[i]);
618 }
619 }
620
bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map * st_map)621 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
622 {
623 u32 i;
624
625 for (i = 0; i < st_map->funcs_cnt; i++) {
626 if (!st_map->ksyms[i])
627 break;
628 kfree(st_map->ksyms[i]);
629 st_map->ksyms[i] = NULL;
630 }
631 }
632
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)633 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
634 void *value, u64 flags)
635 {
636 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
637 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
638 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
639 struct bpf_struct_ops_value *uvalue, *kvalue;
640 const struct btf_type *module_type;
641 const struct btf_member *member;
642 const struct btf_type *t = st_ops_desc->type;
643 struct bpf_tramp_links *tlinks;
644 void *udata, *kdata;
645 int prog_fd, err;
646 u32 i, trampoline_start, image_off = 0;
647 void *cur_image = NULL, *image = NULL;
648 struct bpf_link **plink;
649 struct bpf_ksym **pksym;
650 const char *tname, *mname;
651
652 if (flags)
653 return -EINVAL;
654
655 if (*(u32 *)key != 0)
656 return -E2BIG;
657
658 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
659 if (err)
660 return err;
661
662 uvalue = value;
663 err = check_zero_holes(st_map->btf, t, uvalue->data);
664 if (err)
665 return err;
666
667 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
668 return -EINVAL;
669
670 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
671 if (!tlinks)
672 return -ENOMEM;
673
674 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
675 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
676
677 mutex_lock(&st_map->lock);
678
679 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
680 err = -EBUSY;
681 goto unlock;
682 }
683
684 memcpy(uvalue, value, map->value_size);
685
686 udata = &uvalue->data;
687 kdata = &kvalue->data;
688
689 plink = st_map->links;
690 pksym = st_map->ksyms;
691 tname = btf_name_by_offset(st_map->btf, t->name_off);
692 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
693 for_each_member(i, t, member) {
694 const struct btf_type *mtype, *ptype;
695 struct bpf_prog *prog;
696 struct bpf_tramp_link *link;
697 struct bpf_ksym *ksym;
698 u32 moff;
699
700 moff = __btf_member_bit_offset(t, member) / 8;
701 mname = btf_name_by_offset(st_map->btf, member->name_off);
702 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
703 if (ptype == module_type) {
704 if (*(void **)(udata + moff))
705 goto reset_unlock;
706 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
707 continue;
708 }
709
710 err = st_ops->init_member(t, member, kdata, udata);
711 if (err < 0)
712 goto reset_unlock;
713
714 /* The ->init_member() has handled this member */
715 if (err > 0)
716 continue;
717
718 /* If st_ops->init_member does not handle it,
719 * we will only handle func ptrs and zero-ed members
720 * here. Reject everything else.
721 */
722
723 /* All non func ptr member must be 0 */
724 if (!ptype || !btf_type_is_func_proto(ptype)) {
725 u32 msize;
726
727 mtype = btf_type_by_id(st_map->btf, member->type);
728 mtype = btf_resolve_size(st_map->btf, mtype, &msize);
729 if (IS_ERR(mtype)) {
730 err = PTR_ERR(mtype);
731 goto reset_unlock;
732 }
733
734 if (memchr_inv(udata + moff, 0, msize)) {
735 err = -EINVAL;
736 goto reset_unlock;
737 }
738
739 continue;
740 }
741
742 prog_fd = (int)(*(unsigned long *)(udata + moff));
743 /* Similar check as the attr->attach_prog_fd */
744 if (!prog_fd)
745 continue;
746
747 prog = bpf_prog_get(prog_fd);
748 if (IS_ERR(prog)) {
749 err = PTR_ERR(prog);
750 goto reset_unlock;
751 }
752
753 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
754 prog->aux->attach_btf_id != st_ops_desc->type_id ||
755 prog->expected_attach_type != i) {
756 bpf_prog_put(prog);
757 err = -EINVAL;
758 goto reset_unlock;
759 }
760
761 link = kzalloc(sizeof(*link), GFP_USER);
762 if (!link) {
763 bpf_prog_put(prog);
764 err = -ENOMEM;
765 goto reset_unlock;
766 }
767 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
768 &bpf_struct_ops_link_lops, prog);
769 *plink++ = &link->link;
770
771 ksym = kzalloc(sizeof(*ksym), GFP_USER);
772 if (!ksym) {
773 err = -ENOMEM;
774 goto reset_unlock;
775 }
776 *pksym++ = ksym;
777
778 trampoline_start = image_off;
779 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
780 &st_ops->func_models[i],
781 *(void **)(st_ops->cfi_stubs + moff),
782 &image, &image_off,
783 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
784 if (err)
785 goto reset_unlock;
786
787 if (cur_image != image) {
788 st_map->image_pages[st_map->image_pages_cnt++] = image;
789 cur_image = image;
790 trampoline_start = 0;
791 }
792
793 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
794
795 /* put prog_id to udata */
796 *(unsigned long *)(udata + moff) = prog->aux->id;
797
798 /* init ksym for this trampoline */
799 bpf_struct_ops_ksym_init(tname, mname,
800 image + trampoline_start,
801 image_off - trampoline_start,
802 ksym);
803 }
804
805 if (st_ops->validate) {
806 err = st_ops->validate(kdata);
807 if (err)
808 goto reset_unlock;
809 }
810 for (i = 0; i < st_map->image_pages_cnt; i++) {
811 err = arch_protect_bpf_trampoline(st_map->image_pages[i],
812 PAGE_SIZE);
813 if (err)
814 goto reset_unlock;
815 }
816
817 if (st_map->map.map_flags & BPF_F_LINK) {
818 err = 0;
819 /* Let bpf_link handle registration & unregistration.
820 *
821 * Pair with smp_load_acquire() during lookup_elem().
822 */
823 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
824 goto unlock;
825 }
826
827 err = st_ops->reg(kdata, NULL);
828 if (likely(!err)) {
829 /* This refcnt increment on the map here after
830 * 'st_ops->reg()' is secure since the state of the
831 * map must be set to INIT at this moment, and thus
832 * bpf_struct_ops_map_delete_elem() can't unregister
833 * or transition it to TOBEFREE concurrently.
834 */
835 bpf_map_inc(map);
836 /* Pair with smp_load_acquire() during lookup_elem().
837 * It ensures the above udata updates (e.g. prog->aux->id)
838 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
839 */
840 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
841 goto unlock;
842 }
843
844 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
845 * verified as a whole, after all init_member() calls. Can also happen if
846 * there was a race in registering the struct_ops (under the same name) to
847 * a sub-system through different struct_ops's maps.
848 */
849
850 reset_unlock:
851 bpf_struct_ops_map_free_ksyms(st_map);
852 bpf_struct_ops_map_free_image(st_map);
853 bpf_struct_ops_map_put_progs(st_map);
854 memset(uvalue, 0, map->value_size);
855 memset(kvalue, 0, map->value_size);
856 unlock:
857 kfree(tlinks);
858 mutex_unlock(&st_map->lock);
859 if (!err)
860 bpf_struct_ops_map_add_ksyms(st_map);
861 return err;
862 }
863
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)864 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
865 {
866 enum bpf_struct_ops_state prev_state;
867 struct bpf_struct_ops_map *st_map;
868
869 st_map = (struct bpf_struct_ops_map *)map;
870 if (st_map->map.map_flags & BPF_F_LINK)
871 return -EOPNOTSUPP;
872
873 prev_state = cmpxchg(&st_map->kvalue.common.state,
874 BPF_STRUCT_OPS_STATE_INUSE,
875 BPF_STRUCT_OPS_STATE_TOBEFREE);
876 switch (prev_state) {
877 case BPF_STRUCT_OPS_STATE_INUSE:
878 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
879 bpf_map_put(map);
880 return 0;
881 case BPF_STRUCT_OPS_STATE_TOBEFREE:
882 return -EINPROGRESS;
883 case BPF_STRUCT_OPS_STATE_INIT:
884 return -ENOENT;
885 default:
886 WARN_ON_ONCE(1);
887 /* Should never happen. Treat it as not found. */
888 return -ENOENT;
889 }
890 }
891
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)892 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
893 struct seq_file *m)
894 {
895 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
896 void *value;
897 int err;
898
899 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
900 if (!value)
901 return;
902
903 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
904 if (!err) {
905 btf_type_seq_show(st_map->btf,
906 map->btf_vmlinux_value_type_id,
907 value, m);
908 seq_putc(m, '\n');
909 }
910
911 kfree(value);
912 }
913
__bpf_struct_ops_map_free(struct bpf_map * map)914 static void __bpf_struct_ops_map_free(struct bpf_map *map)
915 {
916 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
917
918 if (st_map->links)
919 bpf_struct_ops_map_put_progs(st_map);
920 if (st_map->ksyms)
921 bpf_struct_ops_map_free_ksyms(st_map);
922 bpf_map_area_free(st_map->links);
923 bpf_map_area_free(st_map->ksyms);
924 bpf_struct_ops_map_free_image(st_map);
925 bpf_map_area_free(st_map->uvalue);
926 bpf_map_area_free(st_map);
927 }
928
bpf_struct_ops_map_free(struct bpf_map * map)929 static void bpf_struct_ops_map_free(struct bpf_map *map)
930 {
931 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
932
933 /* st_ops->owner was acquired during map_alloc to implicitly holds
934 * the btf's refcnt. The acquire was only done when btf_is_module()
935 * st_map->btf cannot be NULL here.
936 */
937 if (btf_is_module(st_map->btf))
938 module_put(st_map->st_ops_desc->st_ops->owner);
939
940 bpf_struct_ops_map_del_ksyms(st_map);
941
942 /* The struct_ops's function may switch to another struct_ops.
943 *
944 * For example, bpf_tcp_cc_x->init() may switch to
945 * another tcp_cc_y by calling
946 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
947 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
948 * and its refcount may reach 0 which then free its
949 * trampoline image while tcp_cc_x is still running.
950 *
951 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
952 * to finish. bpf-tcp-cc prog is non sleepable.
953 * A rcu_tasks gp is to wait for the last few insn
954 * in the tramopline image to finish before releasing
955 * the trampoline image.
956 */
957 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
958
959 __bpf_struct_ops_map_free(map);
960 }
961
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)962 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
963 {
964 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
965 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
966 !attr->btf_vmlinux_value_type_id)
967 return -EINVAL;
968 return 0;
969 }
970
count_func_ptrs(const struct btf * btf,const struct btf_type * t)971 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
972 {
973 int i;
974 u32 count;
975 const struct btf_member *member;
976
977 count = 0;
978 for_each_member(i, t, member)
979 if (btf_type_resolve_func_ptr(btf, member->type, NULL))
980 count++;
981 return count;
982 }
983
bpf_struct_ops_map_alloc(union bpf_attr * attr)984 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
985 {
986 const struct bpf_struct_ops_desc *st_ops_desc;
987 size_t st_map_size;
988 struct bpf_struct_ops_map *st_map;
989 const struct btf_type *t, *vt;
990 struct module *mod = NULL;
991 struct bpf_map *map;
992 struct btf *btf;
993 int ret;
994
995 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
996 /* The map holds btf for its whole life time. */
997 btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
998 if (IS_ERR(btf))
999 return ERR_CAST(btf);
1000 if (!btf_is_module(btf)) {
1001 btf_put(btf);
1002 return ERR_PTR(-EINVAL);
1003 }
1004
1005 mod = btf_try_get_module(btf);
1006 /* mod holds a refcnt to btf. We don't need an extra refcnt
1007 * here.
1008 */
1009 btf_put(btf);
1010 if (!mod)
1011 return ERR_PTR(-EINVAL);
1012 } else {
1013 btf = bpf_get_btf_vmlinux();
1014 if (IS_ERR(btf))
1015 return ERR_CAST(btf);
1016 if (!btf)
1017 return ERR_PTR(-ENOTSUPP);
1018 }
1019
1020 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1021 if (!st_ops_desc) {
1022 ret = -ENOTSUPP;
1023 goto errout;
1024 }
1025
1026 vt = st_ops_desc->value_type;
1027 if (attr->value_size != vt->size) {
1028 ret = -EINVAL;
1029 goto errout;
1030 }
1031
1032 t = st_ops_desc->type;
1033
1034 st_map_size = sizeof(*st_map) +
1035 /* kvalue stores the
1036 * struct bpf_struct_ops_tcp_congestions_ops
1037 */
1038 (vt->size - sizeof(struct bpf_struct_ops_value));
1039
1040 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1041 if (!st_map) {
1042 ret = -ENOMEM;
1043 goto errout;
1044 }
1045
1046 st_map->st_ops_desc = st_ops_desc;
1047 map = &st_map->map;
1048
1049 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1050 st_map->funcs_cnt = count_func_ptrs(btf, t);
1051 st_map->links =
1052 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1053 NUMA_NO_NODE);
1054
1055 st_map->ksyms =
1056 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1057 NUMA_NO_NODE);
1058 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1059 ret = -ENOMEM;
1060 goto errout_free;
1061 }
1062 st_map->btf = btf;
1063
1064 mutex_init(&st_map->lock);
1065 bpf_map_init_from_attr(map, attr);
1066
1067 return map;
1068
1069 errout_free:
1070 __bpf_struct_ops_map_free(map);
1071 errout:
1072 module_put(mod);
1073
1074 return ERR_PTR(ret);
1075 }
1076
bpf_struct_ops_map_mem_usage(const struct bpf_map * map)1077 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1078 {
1079 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1080 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1081 const struct btf_type *vt = st_ops_desc->value_type;
1082 u64 usage;
1083
1084 usage = sizeof(*st_map) +
1085 vt->size - sizeof(struct bpf_struct_ops_value);
1086 usage += vt->size;
1087 usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1088 usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1089 usage += PAGE_SIZE;
1090 return usage;
1091 }
1092
1093 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1094 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1095 .map_alloc_check = bpf_struct_ops_map_alloc_check,
1096 .map_alloc = bpf_struct_ops_map_alloc,
1097 .map_free = bpf_struct_ops_map_free,
1098 .map_get_next_key = bpf_struct_ops_map_get_next_key,
1099 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1100 .map_delete_elem = bpf_struct_ops_map_delete_elem,
1101 .map_update_elem = bpf_struct_ops_map_update_elem,
1102 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1103 .map_mem_usage = bpf_struct_ops_map_mem_usage,
1104 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1105 };
1106
1107 /* "const void *" because some subsystem is
1108 * passing a const (e.g. const struct tcp_congestion_ops *)
1109 */
bpf_struct_ops_get(const void * kdata)1110 bool bpf_struct_ops_get(const void *kdata)
1111 {
1112 struct bpf_struct_ops_value *kvalue;
1113 struct bpf_struct_ops_map *st_map;
1114 struct bpf_map *map;
1115
1116 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1117 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1118
1119 map = __bpf_map_inc_not_zero(&st_map->map, false);
1120 return !IS_ERR(map);
1121 }
1122
bpf_struct_ops_put(const void * kdata)1123 void bpf_struct_ops_put(const void *kdata)
1124 {
1125 struct bpf_struct_ops_value *kvalue;
1126 struct bpf_struct_ops_map *st_map;
1127
1128 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1129 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1130
1131 bpf_map_put(&st_map->map);
1132 }
1133
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)1134 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
1135 {
1136 void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
1137
1138 return func_ptr ? 0 : -ENOTSUPP;
1139 }
1140
bpf_struct_ops_valid_to_reg(struct bpf_map * map)1141 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1142 {
1143 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1144
1145 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1146 map->map_flags & BPF_F_LINK &&
1147 /* Pair with smp_store_release() during map_update */
1148 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1149 }
1150
bpf_struct_ops_map_link_dealloc(struct bpf_link * link)1151 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1152 {
1153 struct bpf_struct_ops_link *st_link;
1154 struct bpf_struct_ops_map *st_map;
1155
1156 st_link = container_of(link, struct bpf_struct_ops_link, link);
1157 st_map = (struct bpf_struct_ops_map *)
1158 rcu_dereference_protected(st_link->map, true);
1159 if (st_map) {
1160 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1161 bpf_map_put(&st_map->map);
1162 }
1163 kfree(st_link);
1164 }
1165
bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1166 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1167 struct seq_file *seq)
1168 {
1169 struct bpf_struct_ops_link *st_link;
1170 struct bpf_map *map;
1171
1172 st_link = container_of(link, struct bpf_struct_ops_link, link);
1173 rcu_read_lock();
1174 map = rcu_dereference(st_link->map);
1175 if (map)
1176 seq_printf(seq, "map_id:\t%d\n", map->id);
1177 rcu_read_unlock();
1178 }
1179
bpf_struct_ops_map_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1180 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1181 struct bpf_link_info *info)
1182 {
1183 struct bpf_struct_ops_link *st_link;
1184 struct bpf_map *map;
1185
1186 st_link = container_of(link, struct bpf_struct_ops_link, link);
1187 rcu_read_lock();
1188 map = rcu_dereference(st_link->map);
1189 if (map)
1190 info->struct_ops.map_id = map->id;
1191 rcu_read_unlock();
1192 return 0;
1193 }
1194
bpf_struct_ops_map_link_update(struct bpf_link * link,struct bpf_map * new_map,struct bpf_map * expected_old_map)1195 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1196 struct bpf_map *expected_old_map)
1197 {
1198 struct bpf_struct_ops_map *st_map, *old_st_map;
1199 struct bpf_map *old_map;
1200 struct bpf_struct_ops_link *st_link;
1201 int err;
1202
1203 st_link = container_of(link, struct bpf_struct_ops_link, link);
1204 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1205
1206 if (!bpf_struct_ops_valid_to_reg(new_map))
1207 return -EINVAL;
1208
1209 if (!st_map->st_ops_desc->st_ops->update)
1210 return -EOPNOTSUPP;
1211
1212 mutex_lock(&update_mutex);
1213
1214 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1215 if (!old_map) {
1216 err = -ENOLINK;
1217 goto err_out;
1218 }
1219 if (expected_old_map && old_map != expected_old_map) {
1220 err = -EPERM;
1221 goto err_out;
1222 }
1223
1224 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1225 /* The new and old struct_ops must be the same type. */
1226 if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1227 err = -EINVAL;
1228 goto err_out;
1229 }
1230
1231 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1232 if (err)
1233 goto err_out;
1234
1235 bpf_map_inc(new_map);
1236 rcu_assign_pointer(st_link->map, new_map);
1237 bpf_map_put(old_map);
1238
1239 err_out:
1240 mutex_unlock(&update_mutex);
1241
1242 return err;
1243 }
1244
bpf_struct_ops_map_link_detach(struct bpf_link * link)1245 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1246 {
1247 struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1248 struct bpf_struct_ops_map *st_map;
1249 struct bpf_map *map;
1250
1251 mutex_lock(&update_mutex);
1252
1253 map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1254 if (!map) {
1255 mutex_unlock(&update_mutex);
1256 return 0;
1257 }
1258 st_map = container_of(map, struct bpf_struct_ops_map, map);
1259
1260 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1261
1262 RCU_INIT_POINTER(st_link->map, NULL);
1263 /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1264 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1265 */
1266 bpf_map_put(&st_map->map);
1267
1268 mutex_unlock(&update_mutex);
1269
1270 wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1271
1272 return 0;
1273 }
1274
bpf_struct_ops_map_link_poll(struct file * file,struct poll_table_struct * pts)1275 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1276 struct poll_table_struct *pts)
1277 {
1278 struct bpf_struct_ops_link *st_link = file->private_data;
1279
1280 poll_wait(file, &st_link->wait_hup, pts);
1281
1282 return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1283 }
1284
1285 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1286 .dealloc = bpf_struct_ops_map_link_dealloc,
1287 .detach = bpf_struct_ops_map_link_detach,
1288 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1289 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1290 .update_map = bpf_struct_ops_map_link_update,
1291 .poll = bpf_struct_ops_map_link_poll,
1292 };
1293
bpf_struct_ops_link_create(union bpf_attr * attr)1294 int bpf_struct_ops_link_create(union bpf_attr *attr)
1295 {
1296 struct bpf_struct_ops_link *link = NULL;
1297 struct bpf_link_primer link_primer;
1298 struct bpf_struct_ops_map *st_map;
1299 struct bpf_map *map;
1300 int err;
1301
1302 map = bpf_map_get(attr->link_create.map_fd);
1303 if (IS_ERR(map))
1304 return PTR_ERR(map);
1305
1306 st_map = (struct bpf_struct_ops_map *)map;
1307
1308 if (!bpf_struct_ops_valid_to_reg(map)) {
1309 err = -EINVAL;
1310 goto err_out;
1311 }
1312
1313 link = kzalloc(sizeof(*link), GFP_USER);
1314 if (!link) {
1315 err = -ENOMEM;
1316 goto err_out;
1317 }
1318 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1319
1320 err = bpf_link_prime(&link->link, &link_primer);
1321 if (err)
1322 goto err_out;
1323
1324 init_waitqueue_head(&link->wait_hup);
1325
1326 /* Hold the update_mutex such that the subsystem cannot
1327 * do link->ops->detach() before the link is fully initialized.
1328 */
1329 mutex_lock(&update_mutex);
1330 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1331 if (err) {
1332 mutex_unlock(&update_mutex);
1333 bpf_link_cleanup(&link_primer);
1334 link = NULL;
1335 goto err_out;
1336 }
1337 RCU_INIT_POINTER(link->map, map);
1338 mutex_unlock(&update_mutex);
1339
1340 return bpf_link_settle(&link_primer);
1341
1342 err_out:
1343 bpf_map_put(map);
1344 kfree(link);
1345 return err;
1346 }
1347
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)1348 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1349 {
1350 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1351
1352 info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1353 }
1354