/linux/tools/testing/selftests/bpf/progs/ |
H A D | verifier_ref_tracking.c | 10 /* struct bpf_sock_tuple tuple = {} */ \ 121 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in reference_tracking_leak_potential_reference() 136 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in potential_reference_to_sock_common_1() 154 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in leak_potential_reference_on_stack() 174 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in potential_reference_on_stack_2() 189 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in reference_tracking_zero_potential_reference() 204 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in potential_reference_to_sock_common_2() 221 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in copy_and_zero_potential_references() 360 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in tracking_release_reference_without_check() 379 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in to_sock_common_without_check() [all...] |
H A D | test_sk_lookup_kern.c | 21 static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off, in get_tuple() 25 struct bpf_sock_tuple *result; in get_tuple() 37 result = (struct bpf_sock_tuple *)&iph->saddr; in get_tuple() 46 result = (struct bpf_sock_tuple *)&ip6h->saddr; in get_tuple() 61 struct bpf_sock_tuple *tuple; in sk_lookup_success() 84 struct bpf_sock_tuple tuple = {}; in sk_lookup_success_simple() 96 struct bpf_sock_tuple tuple = {}; in err_use_after_free() 111 struct bpf_sock_tuple tuple = {}; in err_modify_sk_pointer() 125 struct bpf_sock_tuple tuple = {}; in err_modify_sk_or_null_pointer() 138 struct bpf_sock_tuple tuple = {}; in err_no_release() [all …]
|
H A D | test_bpf_nf_fail.c | 17 struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32, 19 struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32, 32 struct bpf_sock_tuple tup = {}; in alloc_release() 46 struct bpf_sock_tuple tup = {}; in insert_insert() 63 struct bpf_sock_tuple tup = {}; in lookup_insert() 77 struct bpf_sock_tuple tup = {}; in write_not_allowlisted_field() 91 struct bpf_sock_tuple tup = {}; in set_timeout_after_insert() 108 struct bpf_sock_tuple tup = {}; in set_status_after_insert() 125 struct bpf_sock_tuple tup = {}; in change_timeout_after_alloc() 139 struct bpf_sock_tuple tup = {}; in change_status_after_alloc()
|
H A D | test_sk_assign.c | 54 static inline struct bpf_sock_tuple * 59 struct bpf_sock_tuple *result; in get_tuple() 79 result = (struct bpf_sock_tuple *)&iph->saddr; in get_tuple() 88 result = (struct bpf_sock_tuple *)&ip6h->saddr; in get_tuple() 90 return (struct bpf_sock_tuple *)data; in get_tuple() 102 handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4) in handle_udp() 133 handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4) in handle_tcp() 174 struct bpf_sock_tuple *tuple; in bpf_sk_assign_test()
|
H A D | test_bpf_nf.c | 72 struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32, 74 struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32, 76 struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32, 78 struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32, 90 nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, in nf_ct_test() argument 92 struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32, in nf_ct_test() 97 struct bpf_sock_tuple bpf_tuple; in nf_ct_test() 245 nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, in nf_ct_opts_new_test() argument 247 struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32, in nf_ct_opts_new_test() 252 struct bpf_sock_tuple bpf_tuple; in nf_ct_opts_new_test()
|
H A D | test_cls_redirect.c | 94 offsetofend(struct bpf_sock_tuple, ipv4.dport) - 95 offsetof(struct bpf_sock_tuple, ipv4.sport) - 1, 99 offsetofend(struct bpf_sock_tuple, ipv6.dport) - 100 offsetof(struct bpf_sock_tuple, ipv6.sport) - 1, 621 static INLINING uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph, in fill_tuple() 651 struct bpf_sock_tuple *tuple, uint64_t tuplen, in classify_tcp() 684 struct bpf_sock_tuple *tuple, uint64_t tuplen) in classify_udp() 702 struct bpf_sock_tuple *tuple, uint64_t tuplen, in classify_icmp() 752 struct bpf_sock_tuple tuple; in process_icmpv4() 804 struct bpf_sock_tuple tuple; in process_icmpv6() [all …]
|
H A D | test_cls_redirect_dynptr.c | 88 offsetofend(struct bpf_sock_tuple, ipv4.dport) - 89 offsetof(struct bpf_sock_tuple, ipv4.sport) - 1, 93 offsetofend(struct bpf_sock_tuple, ipv6.dport) - 94 offsetof(struct bpf_sock_tuple, ipv6.sport) - 1, 511 static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph, in fill_tuple() 540 static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, in classify_tcp() 572 static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen) in classify_udp() 589 static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple, in classify_icmp() 641 struct bpf_sock_tuple tuple; in process_icmpv4() 657 struct bpf_sock_tuple tuple; in process_icmpv6() [all …]
|
H A D | verifier_unpriv.c | 434 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in pointers_stx_ctx_and_sock() 470 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in different_pointers_stx_leak_sock() 507 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in stx_sock_and_ctx_read() 547 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) in stx_sock_and_ctx_write()
|
H A D | vrf_socket_lookup.c | 21 struct bpf_sock_tuple *tp; in socket_lookup() 36 tp = (struct bpf_sock_tuple *)&iph->saddr; in socket_lookup()
|
H A D | cgroup_skb_sk_lookup_kern.c | 29 static inline void set_tuple(struct bpf_sock_tuple *tuple, in set_tuple() 44 struct bpf_sock_tuple tuple; in is_allowed_peer_cg()
|
H A D | test_btf_skc_cls_ingress.c | 68 struct bpf_sock_tuple *tuple = NULL; in handle_ip6_tcp() 93 tuple = (struct bpf_sock_tuple *)&ip4h->saddr; in handle_ip6_tcp() 110 tuple = (struct bpf_sock_tuple *)&ip6h->saddr; in handle_ip6_tcp()
|
H A D | connect6_prog.c | 30 struct bpf_sock_tuple tuple = {}; in connect_v6_prog()
|
H A D | test_tcp_check_syncookie_kern.c |
|
H A D | connect4_prog.c | 151 struct bpf_sock_tuple tuple = {}; in connect_v4_prog()
|
H A D | xdp_synproxy_kern.c | 99 struct bpf_sock_tuple *bpf_tuple, 105 struct bpf_sock_tuple *bpf_tuple, 436 struct bpf_sock_tuple tup = {}; in tcp_lookup()
|
H A D | test_tcp_custom_syncookie.c | 507 struct bpf_sock_tuple tuple; in tcp_handle_ack()
|
/linux/net/netfilter/ |
H A D | nf_conntrack_bpf.c | 67 static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple, in bpf_nf_ct_tuple_parse() 108 __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple, in __bpf_nf_ct_alloc_entry() 173 struct bpf_sock_tuple *bpf_tuple, in __bpf_nf_ct_lookup() 293 bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, in bpf_xdp_ct_alloc() 327 bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, in bpf_xdp_ct_lookup() 360 bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, in bpf_skb_ct_alloc() 395 bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, in bpf_skb_ct_lookup()
|
/linux/net/core/ |
H A D | filter.c | 6683 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, in sk_lookup() 6733 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, in __bpf_skc_lookup() 6774 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, in __bpf_sk_lookup() 6803 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, in bpf_skc_lookup() 6822 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, in bpf_sk_lookup() 6849 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) in BPF_CALL_5() argument 6868 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) in BPF_CALL_5() argument 6887 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) in BPF_CALL_5() argument 6906 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) in BPF_CALL_5() argument 6930 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) in BPF_CALL_5() argument [all …]
|
/linux/include/uapi/linux/ |
H A D | bpf.h | 6397 struct bpf_sock_tuple { struct
|
/linux/tools/include/uapi/linux/ |
H A D | bpf.h | 6397 struct bpf_sock_tuple { struct
|
/linux/tools/testing/selftests/bpf/ |
H A D | test_verifier.c | 463 /* struct bpf_sock_tuple tuple = {} */ \ in bpf_fill_big_prog_with_loop_1() 474 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
|