Lines Matching +full:async +full:- +full:enum

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
35 * inside its own verifier_ops->get_func_proto() callback it should return
47 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2()
64 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4()
82 return map->ops->map_delete_elem(map, key); in BPF_CALL_2()
96 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3()
111 return map->ops->map_pop_elem(map, value); in BPF_CALL_2()
124 return map->ops->map_peek_elem(map, value); in BPF_CALL_2()
139 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); in BPF_CALL_3()
233 return -EINVAL; in BPF_CALL_0()
235 return (u64) task->tgid << 32 | task->pid; in BPF_CALL_0()
251 return -EINVAL; in BPF_CALL_0()
272 strscpy_pad(buf, task->comm, size); in BPF_CALL_2()
276 return -EINVAL; in BPF_CALL_2()
387 lock = src + map->record->spin_lock_off; in copy_map_value_locked()
389 lock = dst + map->record->spin_lock_off; in copy_map_value_locked()
464 return -EINVAL; in __bpf_strtoull()
467 return -EINVAL; in __bpf_strtoull()
470 return -EINVAL; in __bpf_strtoull()
475 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); in __bpf_strtoull()
479 consumed = cur_buf - buf; in __bpf_strtoull()
480 cur_len -= consumed; in __bpf_strtoull()
482 return -EINVAL; in __bpf_strtoull()
484 cur_len = min(cur_len, sizeof(str) - 1); in __bpf_strtoull()
493 return -ERANGE; in __bpf_strtoull()
496 return -EINVAL; in __bpf_strtoull()
499 consumed += cur_buf - str; in __bpf_strtoull()
515 if ((long long)-_res > 0) in __bpf_strtoll()
516 return -ERANGE; in __bpf_strtoll()
517 *res = -_res; in __bpf_strtoll()
520 return -ERANGE; in __bpf_strtoll()
563 return -EINVAL; in BPF_CALL_4()
598 int err = -EINVAL; in BPF_CALL_4()
611 err = -ENOENT; in BPF_CALL_4()
615 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) in BPF_CALL_4()
618 nsdata->pid = task_pid_nr_ns(task, pidns); in BPF_CALL_4()
619 nsdata->tgid = task_tgid_nr_ns(task, pidns); in BPF_CALL_4()
646 return -EINVAL; in BPF_CALL_5()
669 ret = -EFAULT; in BPF_CALL_3()
692 return -EINVAL; in BPF_CALL_5()
702 /* Return -EFAULT for partial read */ in BPF_CALL_5()
703 return ret < 0 ? ret : -EFAULT; in BPF_CALL_5()
767 return -EINVAL; in bpf_trace_copy_string()
783 return -EBUSY; in bpf_try_get_buffers()
785 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); in bpf_try_get_buffers()
799 if (!data->bin_args && !data->buf) in bpf_bprintf_cleanup()
805 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
810 * - Format string verification only: when data->get_bin_args is false
811 * - Arguments preparation: in addition to the above verification, it writes in
812 * data->bin_args a binary representation of arguments usable by bstr_printf
821 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; in bpf_bprintf_prepare()
831 return -EINVAL; in bpf_bprintf_prepare()
832 fmt_size = fmt_end - fmt; in bpf_bprintf_prepare()
835 return -EBUSY; in bpf_bprintf_prepare()
837 if (data->get_bin_args) { in bpf_bprintf_prepare()
839 tmp_buf = buffers->bin_args; in bpf_bprintf_prepare()
841 data->bin_args = (u32 *)tmp_buf; in bpf_bprintf_prepare()
844 if (data->get_buf) in bpf_bprintf_prepare()
845 data->buf = buffers->buf; in bpf_bprintf_prepare()
849 err = -EINVAL; in bpf_bprintf_prepare()
862 err = -EINVAL; in bpf_bprintf_prepare()
866 /* The string is zero-terminated so if fmt[i] != 0, we can in bpf_bprintf_prepare()
871 /* skip optional "[0 +-][num]" width formatting field */ in bpf_bprintf_prepare()
872 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || in bpf_bprintf_prepare()
910 (tmp_buf_end - tmp_buf), in bpf_bprintf_prepare()
924 err = -EINVAL; in bpf_bprintf_prepare()
933 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { in bpf_bprintf_prepare()
934 err = -ENOSPC; in bpf_bprintf_prepare()
945 * pre-formatted as strings, ironically, the easiest way in bpf_bprintf_prepare()
948 ip_spec[2] = fmt[i - 1]; in bpf_bprintf_prepare()
950 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, in bpf_bprintf_prepare()
963 err = -EINVAL; in bpf_bprintf_prepare()
971 err = -ENOSPC; in bpf_bprintf_prepare()
978 tmp_buf_end - tmp_buf); in bpf_bprintf_prepare()
993 err = -ENOSPC; in bpf_bprintf_prepare()
1017 err = -EINVAL; in bpf_bprintf_prepare()
1026 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { in bpf_bprintf_prepare()
1027 err = -ENOSPC; in bpf_bprintf_prepare()
1059 return -EINVAL; in BPF_CALL_5()
1062 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we in BPF_CALL_5()
1089 if (map->map_type == BPF_MAP_TYPE_ARRAY) { in map_key_from_value()
1092 *arr_idx = ((char *)value - array->value) / array->elem_size; in map_key_from_value()
1095 return (void *)value - round_up(map->key_size, 8); in map_key_from_value()
1120 * ops->map_release_uref callback is responsible for cancelling the timers,
1123 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1152 enum bpf_async_type {
1159 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) in bpf_timer_cb()
1162 struct bpf_map *map = t->cb.map; in bpf_timer_cb()
1163 void *value = t->cb.value; in bpf_timer_cb()
1169 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); in bpf_timer_cb()
1194 struct bpf_async_cb *cb = &w->cb; in bpf_wq_work()
1195 struct bpf_map *map = cb->map; in bpf_wq_work()
1197 void *value = cb->value; in bpf_wq_work()
1203 callback_fn = READ_ONCE(cb->callback_fn); in bpf_wq_work()
1229 cancel_work_sync(&w->work); in bpf_wq_delete_work()
1231 call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free); in bpf_wq_delete_work()
1240 * call_rcu() right after for both preallocated and non-preallocated in bpf_timer_delete_work()
1241 * maps. The async->cb = NULL was already done and no code path can see in bpf_timer_delete_work()
1245 hrtimer_cancel(&t->timer); in bpf_timer_delete_work()
1246 call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free); in bpf_timer_delete_work()
1249 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, in __bpf_async_init() argument
1250 enum bpf_async_type type) in __bpf_async_init()
1260 return -EOPNOTSUPP; in __bpf_async_init()
1270 return -EINVAL; in __bpf_async_init()
1273 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_init()
1274 t = async->timer; in __bpf_async_init()
1276 ret = -EBUSY; in __bpf_async_init()
1280 cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node); in __bpf_async_init()
1282 ret = -ENOMEM; in __bpf_async_init()
1288 clockid = flags & (MAX_CLOCKS - 1); in __bpf_async_init()
1291 atomic_set(&t->cancelling, 0); in __bpf_async_init()
1292 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); in __bpf_async_init()
1293 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); in __bpf_async_init()
1294 cb->value = (void *)async - map->record->timer_off; in __bpf_async_init()
1299 INIT_WORK(&w->work, bpf_wq_work); in __bpf_async_init()
1300 INIT_WORK(&w->delete_work, bpf_wq_delete_work); in __bpf_async_init()
1301 cb->value = (void *)async - map->record->wq_off; in __bpf_async_init()
1304 cb->map = map; in __bpf_async_init()
1305 cb->prog = NULL; in __bpf_async_init()
1306 cb->flags = flags; in __bpf_async_init()
1307 rcu_assign_pointer(cb->callback_fn, NULL); in __bpf_async_init()
1309 WRITE_ONCE(async->cb, cb); in __bpf_async_init()
1310 /* Guarantee the order between async->cb and map->usercnt. So in __bpf_async_init()
1312 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL in __bpf_async_init()
1316 if (!atomic64_read(&map->usercnt)) { in __bpf_async_init()
1320 WRITE_ONCE(async->cb, NULL); in __bpf_async_init()
1322 ret = -EPERM; in __bpf_async_init()
1325 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_init()
1332 clock_t clockid = flags & (MAX_CLOCKS - 1); in BPF_CALL_3()
1343 return -EINVAL; in BPF_CALL_3()
1357 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, in __bpf_async_set_callback() argument
1359 enum bpf_async_type type) in __bpf_async_set_callback()
1361 struct bpf_prog *prev, *prog = aux->prog; in __bpf_async_set_callback()
1366 return -EOPNOTSUPP; in __bpf_async_set_callback()
1367 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_set_callback()
1368 cb = async->cb; in __bpf_async_set_callback()
1370 ret = -EINVAL; in __bpf_async_set_callback()
1373 if (!atomic64_read(&cb->map->usercnt)) { in __bpf_async_set_callback()
1379 ret = -EPERM; in __bpf_async_set_callback()
1382 prev = cb->prog; in __bpf_async_set_callback()
1385 * can pick different callback_fn-s within the same prog. in __bpf_async_set_callback()
1395 cb->prog = prog; in __bpf_async_set_callback()
1397 rcu_assign_pointer(cb->callback_fn, callback_fn); in __bpf_async_set_callback()
1399 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_set_callback()
1421 enum hrtimer_mode mode; in BPF_CALL_3()
1424 return -EOPNOTSUPP; in BPF_CALL_3()
1426 return -EINVAL; in BPF_CALL_3()
1427 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_3()
1428 t = timer->timer; in BPF_CALL_3()
1429 if (!t || !t->cb.prog) { in BPF_CALL_3()
1430 ret = -EINVAL; in BPF_CALL_3()
1442 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); in BPF_CALL_3()
1444 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_3()
1457 static void drop_prog_refcnt(struct bpf_async_cb *async) in drop_prog_refcnt() argument
1459 struct bpf_prog *prog = async->prog; in drop_prog_refcnt()
1463 async->prog = NULL; in drop_prog_refcnt()
1464 rcu_assign_pointer(async->callback_fn, NULL); in drop_prog_refcnt()
1475 return -EOPNOTSUPP; in BPF_CALL_1()
1477 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_1()
1478 t = timer->timer; in BPF_CALL_1()
1480 ret = -EINVAL; in BPF_CALL_1()
1490 ret = -EDEADLK; in BPF_CALL_1()
1494 /* Only account in-flight cancellations when invoked from a timer in BPF_CALL_1()
1496 * are waiting on us, to avoid introducing lockups. Non-callback paths in BPF_CALL_1()
1501 atomic_inc(&t->cancelling); in BPF_CALL_1()
1505 if (atomic_read(&cur_t->cancelling)) { in BPF_CALL_1()
1514 ret = -EDEADLK; in BPF_CALL_1()
1518 drop_prog_refcnt(&t->cb); in BPF_CALL_1()
1520 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_1()
1524 ret = ret ?: hrtimer_cancel(&t->timer); in BPF_CALL_1()
1526 atomic_dec(&t->cancelling); in BPF_CALL_1()
1538 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) in __bpf_async_cancel_and_free() argument
1542 /* Performance optimization: read async->cb without lock first. */ in __bpf_async_cancel_and_free()
1543 if (!READ_ONCE(async->cb)) in __bpf_async_cancel_and_free()
1546 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_cancel_and_free()
1547 /* re-read it under lock */ in __bpf_async_cancel_and_free()
1548 cb = async->cb; in __bpf_async_cancel_and_free()
1555 WRITE_ONCE(async->cb, NULL); in __bpf_async_cancel_and_free()
1557 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_cancel_and_free()
1562 * by ops->map_release_uref when the user space reference to a map reaches zero.
1575 * just return -1). Though callback_fn is still running on this cpu it's in bpf_timer_cancel_and_free()
1578 * since async->cb = NULL was already done. The timer will be in bpf_timer_cancel_and_free()
1602 queue_work(system_dfl_wq, &t->cb.delete_work); in bpf_timer_cancel_and_free()
1612 if (hrtimer_try_to_cancel(&t->timer) >= 0) in bpf_timer_cancel_and_free()
1613 call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free); in bpf_timer_cancel_and_free()
1615 queue_work(system_dfl_wq, &t->cb.delete_work); in bpf_timer_cancel_and_free()
1617 bpf_timer_delete_work(&t->cb.delete_work); in bpf_timer_cancel_and_free()
1622 * by ops->map_release_uref when the user space reference to a map reaches zero.
1638 schedule_work(&work->delete_work); in bpf_wq_cancel_and_free()
1663 /* Since the upper 8 bits of dynptr->size is reserved, the
1664 * maximum supported size is 2^24 - 1.
1666 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1673 return ptr->size & DYNPTR_RDONLY_BIT; in __bpf_dynptr_is_rdonly()
1678 ptr->size |= DYNPTR_RDONLY_BIT; in bpf_dynptr_set_rdonly()
1681 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) in bpf_dynptr_set_type()
1683 ptr->size |= type << DYNPTR_TYPE_SHIFT; in bpf_dynptr_set_type()
1686 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) in bpf_dynptr_get_type()
1688 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; in bpf_dynptr_get_type()
1693 return ptr->size & DYNPTR_SIZE_MASK; in __bpf_dynptr_size()
1698 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; in bpf_dynptr_set_size()
1700 ptr->size = new_size | metadata; in bpf_dynptr_set_size()
1705 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; in bpf_dynptr_check_size()
1709 enum bpf_dynptr_type type, u32 offset, u32 size) in bpf_dynptr_init()
1711 ptr->data = data; in bpf_dynptr_init()
1712 ptr->offset = offset; in bpf_dynptr_init()
1713 ptr->size = size; in bpf_dynptr_init()
1734 err = -EINVAL; in BPF_CALL_4()
1760 enum bpf_dynptr_type type; in __bpf_dynptr_read()
1763 if (!src->data || flags) in __bpf_dynptr_read()
1764 return -EINVAL; in __bpf_dynptr_read()
1779 memmove(dst, src->data + src->offset + offset, len); in __bpf_dynptr_read()
1782 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); in __bpf_dynptr_read()
1784 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); in __bpf_dynptr_read()
1786 memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len); in __bpf_dynptr_read()
1790 return -EFAULT; in __bpf_dynptr_read()
1814 enum bpf_dynptr_type type; in __bpf_dynptr_write()
1817 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) in __bpf_dynptr_write()
1818 return -EINVAL; in __bpf_dynptr_write()
1830 return -EINVAL; in __bpf_dynptr_write()
1835 memmove(dst->data + dst->offset + offset, src, len); in __bpf_dynptr_write()
1838 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, in __bpf_dynptr_write()
1842 return -EINVAL; in __bpf_dynptr_write()
1843 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); in __bpf_dynptr_write()
1846 return -EINVAL; in __bpf_dynptr_write()
1847 memmove(bpf_skb_meta_pointer(dst->data, dst->offset + offset), src, len); in __bpf_dynptr_write()
1851 return -EFAULT; in __bpf_dynptr_write()
1874 enum bpf_dynptr_type type; in BPF_CALL_3()
1877 if (!ptr->data) in BPF_CALL_3()
1892 return (unsigned long)(ptr->data + ptr->offset + offset); in BPF_CALL_3()
1928 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in bpf_base_func_proto()
1985 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) in bpf_base_func_proto()
2057 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) in bpf_base_func_proto()
2100 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto in bpf_base_func_proto()
2127 if (!head->next || list_empty(head)) in bpf_list_head_free()
2129 head = head->next; in bpf_list_head_free()
2137 obj -= field->graph_root.node_offset; in bpf_list_head_free()
2138 head = head->next; in bpf_list_head_free()
2142 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_list_head_free()
2176 obj -= field->graph_root.node_offset; in bpf_rb_root_free()
2179 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_rb_root_free()
2195 bpf_obj_init(meta->record, p); in bpf_obj_new_impl()
2212 if (rec && rec->refcount_off >= 0 && in __bpf_obj_drop_impl()
2213 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { in __bpf_obj_drop_impl()
2235 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); in bpf_obj_drop_impl()
2252 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); in bpf_refcount_acquire_impl()
2266 struct list_head *n = &node->list_head, *h = (void *)head; in __bpf_list_add()
2268 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_add()
2271 if (unlikely(!h->next)) in __bpf_list_add()
2274 /* node->owner != NULL implies !list_empty(n), no need to separately in __bpf_list_add()
2277 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_list_add()
2279 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_list_add()
2280 return -EINVAL; in __bpf_list_add()
2284 WRITE_ONCE(node->owner, head); in __bpf_list_add()
2296 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); in bpf_list_push_front_impl()
2306 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); in bpf_list_push_back_impl()
2314 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_del()
2317 if (unlikely(!h->next)) in __bpf_list_del()
2322 n = tail ? h->prev : h->next; in __bpf_list_del()
2324 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) in __bpf_list_del()
2328 WRITE_ONCE(node->owner, NULL); in __bpf_list_del()
2346 if (list_empty(h) || unlikely(!h->next)) in bpf_list_front()
2349 return (struct bpf_list_node *)h->next; in bpf_list_front()
2356 if (list_empty(h) || unlikely(!h->next)) in bpf_list_back()
2359 return (struct bpf_list_node *)h->prev; in bpf_list_back()
2367 struct rb_node *n = &node_internal->rb_node; in bpf_rbtree_remove()
2369 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or in bpf_rbtree_remove()
2372 if (READ_ONCE(node_internal->owner) != root) in bpf_rbtree_remove()
2377 WRITE_ONCE(node_internal->owner, NULL); in bpf_rbtree_remove()
2388 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; in __bpf_rbtree_add()
2389 struct rb_node *parent = NULL, *n = &node->rb_node; in __bpf_rbtree_add()
2393 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately in __bpf_rbtree_add()
2396 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_rbtree_add()
2398 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_rbtree_add()
2399 return -EINVAL; in __bpf_rbtree_add()
2405 link = &parent->rb_left; in __bpf_rbtree_add()
2407 link = &parent->rb_right; in __bpf_rbtree_add()
2414 WRITE_ONCE(node->owner, root); in __bpf_rbtree_add()
2425 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); in bpf_rbtree_add_impl()
2439 return (struct bpf_rb_node *)r->rb_root.rb_node; in bpf_rbtree_root()
2446 if (READ_ONCE(node_internal->owner) != root) in bpf_rbtree_left()
2449 return (struct bpf_rb_node *)node_internal->rb_node.rb_left; in bpf_rbtree_left()
2456 if (READ_ONCE(node_internal->owner) != root) in bpf_rbtree_right()
2459 return (struct bpf_rb_node *)node_internal->rb_node.rb_right; in bpf_rbtree_right()
2463 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2470 if (refcount_inc_not_zero(&p->rcu_users)) in bpf_task_acquire()
2476 * bpf_task_release - Release the reference acquired on a task.
2492 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2503 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2521 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2531 if (level > cgrp->level || level < 0) in bpf_cgroup_ancestor()
2535 ancestor = cgrp->ancestors[level]; in bpf_cgroup_ancestor()
2542 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2558 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2583 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
2584 return -E2BIG; in BPF_CALL_2()
2586 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
2588 return -EAGAIN; in BPF_CALL_2()
2602 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2622 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2641 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2660 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2663 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2667 * For non-skb and non-xdp type dynptrs, there is no difference between
2679 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2682 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2691 enum bpf_dynptr_type type; in bpf_dynptr_slice()
2695 if (!ptr->data) in bpf_dynptr_slice()
2707 return ptr->data + ptr->offset + offset; in bpf_dynptr_slice()
2710 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); in bpf_dynptr_slice()
2712 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2715 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2721 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); in bpf_dynptr_slice()
2725 return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset); in bpf_dynptr_slice()
2733 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2736 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2740 * For non-skb and non-xdp type dynptrs, there is no difference between
2766 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2779 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) in bpf_dynptr_slice_rdwr()
2784 * For skb-type dynptrs, it is safe to write into the returned pointer in bpf_dynptr_slice_rdwr()
2812 if (!ptr->data || start > end) in bpf_dynptr_adjust()
2813 return -EINVAL; in bpf_dynptr_adjust()
2818 return -ERANGE; in bpf_dynptr_adjust()
2820 ptr->offset += start; in bpf_dynptr_adjust()
2821 bpf_dynptr_set_size(ptr, end - start); in bpf_dynptr_adjust()
2830 return !ptr->data; in bpf_dynptr_is_null()
2837 if (!ptr->data) in bpf_dynptr_is_rdonly()
2847 if (!ptr->data) in bpf_dynptr_size()
2848 return -EINVAL; in bpf_dynptr_size()
2859 if (!ptr->data) { in bpf_dynptr_clone()
2861 return -EINVAL; in bpf_dynptr_clone()
2870 * bpf_dynptr_copy() - Copy data from one dynptr to another.
2871 * @dst_ptr: Destination dynptr - where data should be copied to
2873 * @src_ptr: Source dynptr - where data should be copied from
2905 return -E2BIG; in bpf_dynptr_copy()
2909 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); in bpf_dynptr_copy()
2925 * bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
2926 * @p: Destination dynptr - where data will be filled
2950 return -EINVAL; in bpf_dynptr_memset()
2956 /* Non-linear data under the dynptr, write from a local buffer */ in bpf_dynptr_memset()
2961 chunk_sz = min_t(u32, sizeof(buf), size - write_off); in bpf_dynptr_memset()
3011 return !ctx->cnt; in bpf_stack_walker()
3012 ctx->cnt++; in bpf_stack_walker()
3015 ctx->aux = prog->aux; in bpf_stack_walker()
3016 ctx->sp = sp; in bpf_stack_walker()
3017 ctx->bp = bp; in bpf_stack_walker()
3028 WARN_ON_ONCE(!ctx.aux->exception_boundary); in bpf_throw()
3036 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); in bpf_throw()
3042 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_init() local
3049 return -EINVAL; in bpf_wq_init()
3051 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_init()
3056 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_start() local
3060 return -EOPNOTSUPP; in bpf_wq_start()
3062 return -EINVAL; in bpf_wq_start()
3063 w = READ_ONCE(async->work); in bpf_wq_start()
3064 if (!w || !READ_ONCE(w->cb.prog)) in bpf_wq_start()
3065 return -EINVAL; in bpf_wq_start()
3067 schedule_work(&w->work); in bpf_wq_start()
3077 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_set_callback_impl() local
3080 return -EINVAL; in bpf_wq_set_callback_impl()
3082 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_set_callback_impl()
3110 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3112 * return the same result, as both point to the same 8-byte area.
3114 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3116 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3117 * long is composed of bits 32-63 of the u64.
3119 * However, for 32-bit big-endian hosts, this is not the case. The first
3120 * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3134 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3137 * @nr_words: The size of the specified memory area, measured in 8-byte units.
3160 kit->nr_bits = 0; in bpf_iter_bits_new()
3161 kit->bits_copy = 0; in bpf_iter_bits_new()
3162 kit->bit = -1; in bpf_iter_bits_new()
3165 return -EINVAL; in bpf_iter_bits_new()
3167 return -E2BIG; in bpf_iter_bits_new()
3171 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
3173 return -EFAULT; in bpf_iter_bits_new()
3175 swap_ulong_in_u64(&kit->bits_copy, nr_words); in bpf_iter_bits_new()
3177 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
3182 return -E2BIG; in bpf_iter_bits_new()
3185 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); in bpf_iter_bits_new()
3186 if (!kit->bits) in bpf_iter_bits_new()
3187 return -ENOMEM; in bpf_iter_bits_new()
3189 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
3191 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_new()
3195 swap_ulong_in_u64(kit->bits, nr_words); in bpf_iter_bits_new()
3197 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
3202 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3213 int bit = kit->bit, nr_bits = kit->nr_bits; in bpf_iter_bits_next()
3219 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; in bpf_iter_bits_next()
3222 kit->bit = bit; in bpf_iter_bits_next()
3226 kit->bit = bit; in bpf_iter_bits_next()
3227 return &kit->bit; in bpf_iter_bits_next()
3231 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3240 if (kit->nr_bits <= 64) in bpf_iter_bits_destroy()
3242 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_destroy()
3246 * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3253 * Copies a NUL-terminated string from userspace to BPF space. If user string is
3265 return -EINVAL; in bpf_copy_from_user_str()
3270 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); in bpf_copy_from_user_str()
3279 memset((char *)dst + ret, 0, dst__sz - ret); in bpf_copy_from_user_str()
3287 * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3312 return -EINVAL; in bpf_copy_from_user_task_str()
3325 memset(dst + ret, 0, dst__sz - ret); in bpf_copy_from_user_task_str()
3332 * unsigned long always points to 8-byte region on stack, the kernel may only
3333 * read and write the 4-bytes on 32-bit.
3352 * Since strings are not necessarily %NUL-terminated, we cannot directly call
3353 * in-kernel implementations. Instead, we open-code the implementations using
3364 return -ERANGE; in __bpf_strcasecmp()
3376 return c1 < c2 ? -1 : 1; in __bpf_strcasecmp()
3382 return -E2BIG; in __bpf_strcasecmp()
3384 return -EFAULT; in __bpf_strcasecmp()
3388 * bpf_strcmp - Compare two strings
3393 * * %0 - Strings are equal
3394 * * %-1 - @s1__ign is smaller
3395 * * %1 - @s2__ign is smaller
3396 * * %-EFAULT - Cannot read one of the strings
3397 * * %-E2BIG - One of strings is too large
3398 * * %-ERANGE - One of strings is outside of kernel address space
3406 * bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3411 * * %0 - Strings are equal
3412 * * %-1 - @s1__ign is smaller
3413 * * %1 - @s2__ign is smaller
3414 * * %-EFAULT - Cannot read one of the strings
3415 * * %-E2BIG - One of strings is too large
3416 * * %-ERANGE - One of strings is outside of kernel address space
3424 * bpf_strnchr - Find a character in a length limited string
3429 * Note that the %NUL-terminator is considered part of the string, and can
3433 * * >=0 - Index of the first occurrence of @c within @s__ign
3434 * * %-ENOENT - @c not found in the first @count characters of @s__ign
3435 * * %-EFAULT - Cannot read @s__ign
3436 * * %-E2BIG - @s__ign is too large
3437 * * %-ERANGE - @s__ign is outside of kernel address space
3445 return -ERANGE; in bpf_strnchr()
3453 return -ENOENT; in bpf_strnchr()
3456 return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT; in bpf_strnchr()
3458 return -EFAULT; in bpf_strnchr()
3462 * bpf_strchr - Find the first occurrence of a character in a string
3466 * Note that the %NUL-terminator is considered part of the string, and can
3470 * * >=0 - The index of the first occurrence of @c within @s__ign
3471 * * %-ENOENT - @c not found in @s__ign
3472 * * %-EFAULT - Cannot read @s__ign
3473 * * %-E2BIG - @s__ign is too large
3474 * * %-ERANGE - @s__ign is outside of kernel address space
3482 * bpf_strchrnul - Find and return a character in a string, or end of string
3487 * * >=0 - Index of the first occurrence of @c within @s__ign or index of
3489 * * %-EFAULT - Cannot read @s__ign
3490 * * %-E2BIG - @s__ign is too large
3491 * * %-ERANGE - @s__ign is outside of kernel address space
3499 return -ERANGE; in bpf_strchrnul()
3508 return -E2BIG; in bpf_strchrnul()
3510 return -EFAULT; in bpf_strchrnul()
3514 * bpf_strrchr - Find the last occurrence of a character in a string
3519 * * >=0 - Index of the last occurrence of @c within @s__ign
3520 * * %-ENOENT - @c not found in @s__ign
3521 * * %-EFAULT - Cannot read @s__ign
3522 * * %-E2BIG - @s__ign is too large
3523 * * %-ERANGE - @s__ign is outside of kernel address space
3528 int i, last = -ENOENT; in bpf_strrchr()
3531 return -ERANGE; in bpf_strrchr()
3542 return -E2BIG; in bpf_strrchr()
3544 return -EFAULT; in bpf_strrchr()
3548 * bpf_strnlen - Calculate the length of a length-limited string
3553 * * >=0 - The length of @s__ign
3554 * * %-EFAULT - Cannot read @s__ign
3555 * * %-E2BIG - @s__ign is too large
3556 * * %-ERANGE - @s__ign is outside of kernel address space
3564 return -ERANGE; in bpf_strnlen()
3573 return i == XATTR_SIZE_MAX ? -E2BIG : i; in bpf_strnlen()
3575 return -EFAULT; in bpf_strnlen()
3579 * bpf_strlen - Calculate the length of a string
3583 * * >=0 - The length of @s__ign
3584 * * %-EFAULT - Cannot read @s__ign
3585 * * %-E2BIG - @s__ign is too large
3586 * * %-ERANGE - @s__ign is outside of kernel address space
3594 * bpf_strspn - Calculate the length of the initial substring of @s__ign which
3600 * * >=0 - The length of the initial substring of @s__ign which only
3602 * * %-EFAULT - Cannot read one of the strings
3603 * * %-E2BIG - One of the strings is too large
3604 * * %-ERANGE - One of the strings is outside of kernel address space
3613 return -ERANGE; in bpf_strspn()
3627 return -E2BIG; in bpf_strspn()
3632 return -E2BIG; in bpf_strspn()
3634 return -EFAULT; in bpf_strspn()
3638 * bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3644 * * >=0 - The length of the initial substring of @s__ign which does not
3646 * * %-EFAULT - Cannot read one of the strings
3647 * * %-E2BIG - One of the strings is too large
3648 * * %-ERANGE - One of the strings is outside of kernel address space
3657 return -ERANGE; in bpf_strcspn()
3671 return -E2BIG; in bpf_strcspn()
3676 return -E2BIG; in bpf_strcspn()
3678 return -EFAULT; in bpf_strcspn()
3682 * bpf_strnstr - Find the first substring in a length-limited string
3688 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3690 * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3691 * * %-EFAULT - Cannot read one of the strings
3692 * * %-E2BIG - One of the strings is too large
3693 * * %-ERANGE - One of the strings is outside of kernel address space
3702 return -ERANGE; in bpf_strnstr()
3720 return -ENOENT; in bpf_strnstr()
3725 return -E2BIG; in bpf_strnstr()
3727 return -ENOENT; in bpf_strnstr()
3730 return -E2BIG; in bpf_strnstr()
3732 return -EFAULT; in bpf_strnstr()
3736 * bpf_strstr - Find the first substring in a string
3741 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3743 * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3744 * * %-EFAULT - Cannot read one of the strings
3745 * * %-E2BIG - One of the strings is too large
3746 * * %-ERANGE - One of the strings is outside of kernel address space
3754 * bpf_lookup_user_key - lookup a key by its serial
3756 * @flags: lookup-specific flags
3767 * one of the available key-specific kfuncs.
3800 bkey->key = key_ref_to_ptr(key_ref); in bpf_lookup_user_key()
3801 bkey->has_ref = true; in bpf_lookup_user_key()
3807 * bpf_lookup_system_key - lookup a key by a system-defined ID
3825 * pre-determined ID on success, a NULL pointer otherwise
3838 bkey->key = (struct key *)(unsigned long)id; in bpf_lookup_system_key()
3839 bkey->has_ref = false; in bpf_lookup_system_key()
3845 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
3853 if (bkey->has_ref) in bpf_key_put()
3854 key_put(bkey->key); in bpf_key_put()
3860 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
3881 if (trusted_keyring->has_ref) { in bpf_verify_pkcs7_signature()
3890 ret = key_validate(trusted_keyring->key); in bpf_verify_pkcs7_signature()
3901 trusted_keyring->key, in bpf_verify_pkcs7_signature()
3905 return -EOPNOTSUPP; in bpf_verify_pkcs7_signature()
3912 enum bpf_task_work_state {
3928 enum bpf_task_work_state state;
3939 enum task_work_notify_mode mode;
3951 if (ctx->prog) { in bpf_task_work_ctx_reset()
3952 bpf_prog_put(ctx->prog); in bpf_task_work_ctx_reset()
3953 ctx->prog = NULL; in bpf_task_work_ctx_reset()
3955 if (ctx->task) { in bpf_task_work_ctx_reset()
3956 bpf_task_release(ctx->task); in bpf_task_work_ctx_reset()
3957 ctx->task = NULL; in bpf_task_work_ctx_reset()
3963 return refcount_inc_not_zero(&ctx->refcnt); in bpf_task_work_ctx_tryget()
3968 if (!refcount_dec_and_test(&ctx->refcnt)) in bpf_task_work_ctx_put()
3987 if (task_work_cancel(ctx->task, &ctx->work)) in bpf_task_work_cancel()
3994 enum bpf_task_work_state state; in bpf_task_work_callback()
4002 * SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING. in bpf_task_work_callback()
4004 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING); in bpf_task_work_callback()
4006 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING); in bpf_task_work_callback()
4012 key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx); in bpf_task_work_callback()
4015 ctx->callback_fn(ctx->map, key, ctx->map_val); in bpf_task_work_callback()
4019 (void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY); in bpf_task_work_callback()
4027 enum bpf_task_work_state state; in bpf_task_work_irq()
4032 if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) { in bpf_task_work_irq()
4037 err = task_work_add(ctx->task, &ctx->work, ctx->mode); in bpf_task_work_irq()
4044 (void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY); in bpf_task_work_irq()
4051 * complete running by now, going SCHEDULING -> RUNNING and then in bpf_task_work_irq()
4053 * protected below ctx->state access, we rely on RCU protection to in bpf_task_work_irq()
4054 * perform below SCHEDULING -> SCHEDULED attempt. in bpf_task_work_irq()
4056 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED); in bpf_task_work_irq()
4067 ctx = READ_ONCE(twk->ctx); in bpf_task_work_fetch_ctx()
4073 return ERR_PTR(-ENOMEM); in bpf_task_work_fetch_ctx()
4076 refcount_set(&ctx->refcnt, 1); /* map's own ref */ in bpf_task_work_fetch_ctx()
4077 ctx->state = BPF_TW_STANDBY; in bpf_task_work_fetch_ctx()
4079 old_ctx = cmpxchg(&twk->ctx, NULL, ctx); in bpf_task_work_fetch_ctx()
4082 * tw->ctx is set by concurrent BPF program, release allocated in bpf_task_work_fetch_ctx()
4103 return ERR_PTR(-EBUSY); in bpf_task_work_acquire_ctx()
4105 if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) { in bpf_task_work_acquire_ctx()
4108 return ERR_PTR(-EBUSY); in bpf_task_work_acquire_ctx()
4116 if (!atomic64_read(&map->usercnt)) { in bpf_task_work_acquire_ctx()
4121 return ERR_PTR(-EBUSY); in bpf_task_work_acquire_ctx()
4129 struct bpf_prog_aux *aux, enum task_work_notify_mode mode) in bpf_task_work_schedule()
4137 prog = bpf_prog_inc_not_zero(aux->prog); in bpf_task_work_schedule()
4139 return -EBADF; in bpf_task_work_schedule()
4142 err = -EBADF; in bpf_task_work_schedule()
4152 ctx->task = task; in bpf_task_work_schedule()
4153 ctx->callback_fn = callback_fn; in bpf_task_work_schedule()
4154 ctx->prog = prog; in bpf_task_work_schedule()
4155 ctx->mode = mode; in bpf_task_work_schedule()
4156 ctx->map = map; in bpf_task_work_schedule()
4157 ctx->map_val = (void *)tw - map->record->task_work_off; in bpf_task_work_schedule()
4158 init_task_work(&ctx->work, bpf_task_work_callback); in bpf_task_work_schedule()
4159 init_irq_work(&ctx->irq_work, bpf_task_work_irq); in bpf_task_work_schedule()
4161 irq_work_queue(&ctx->irq_work); in bpf_task_work_schedule()
4172 * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
4189 * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
4219 enum bpf_task_work_state state; in bpf_task_work_cancel_and_free()
4221 ctx = xchg(&twk->ctx, NULL); in bpf_task_work_cancel_and_free()
4225 state = xchg(&ctx->state, BPF_TW_FREED); in bpf_task_work_cancel_and_free()
4228 init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled); in bpf_task_work_cancel_and_free()
4229 irq_work_queue(&ctx->irq_work); in bpf_task_work_cancel_and_free()