/linux/samples/bpf/ |
H A D | tcp_dumpstats_kern.c | 28 struct bpf_tcp_sock *tcp_sk; in _sockops() local 56 tcp_sk = bpf_tcp_sock(sk); in _sockops() 57 if (!tcp_sk) in _sockops() 63 tcp_sk->dsack_dups, tcp_sk->delivered); in _sockops() 65 tcp_sk->delivered_ce, tcp_sk->icsk_retransmits); in _sockops()
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | cgrp_ls_attach_cgroup.c | 27 struct tcp_sock *tcp_sk; in set_cookie() local 37 tcp_sk = bpf_skc_to_tcp_sock(sk); in set_cookie() 38 if (!tcp_sk) in set_cookie() 42 tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, in set_cookie() 56 struct tcp_sock *tcp_sk; in update_cookie_sockops() local 66 tcp_sk = bpf_skc_to_tcp_sock(sk); in update_cookie_sockops() 67 if (!tcp_sk) in update_cookie_sockops() 71 tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0); in update_cookie_sockops()
|
H A D | tcp_rtt.c | 29 struct bpf_tcp_sock *tcp_sk; in _sockops() local 50 tcp_sk = bpf_tcp_sock(sk); in _sockops() 51 if (!tcp_sk) in _sockops() 56 storage->dsack_dups = tcp_sk->dsack_dups; in _sockops() 57 storage->delivered = tcp_sk->delivered; in _sockops() 58 storage->delivered_ce = tcp_sk->delivered_ce; in _sockops() 59 storage->icsk_retransmits = tcp_sk->icsk_retransmits; in _sockops()
|
H A D | bpf_dctcp.c | 70 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 118 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 127 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 157 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss() 176 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ece_ack_cwr() 205 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; in dctcp_ece_ack_update() 234 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); in BPF_PROG()
|
H A D | sock_destroy_prog.c | 79 struct tcp6_sock *tcp_sk; in iter_tcp6_server() local 88 tcp_sk = bpf_skc_to_tcp6_sock(sk_common); in iter_tcp6_server() 89 if (!tcp_sk) in iter_tcp6_server() 92 icsk = &tcp_sk->tcp.inet_conn; in iter_tcp6_server()
|
H A D | tcp_ca_write_sk_pacing.c | 38 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 49 return tcp_sk(sk)->snd_ssthresh; in BPF_PROG() 55 return tcp_sk(sk)->snd_cwnd; in BPF_PROG()
|
H A D | bpf_cubic.c | 166 return tcp_sk(sk)->tcp_mstamp; in bictcp_clock_us() 171 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() 191 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in BPF_PROG() 202 delta = now - tcp_sk(sk)->lsndtime; in BPF_PROG() 388 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 408 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 456 struct tcp_sock *tp = tcp_sk(sk); in hystart_update() 505 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
|
H A D | tcp_ca_incompl_cong_ops.c | 12 return tcp_sk(sk)->snd_ssthresh; in BPF_PROG() 18 return tcp_sk(sk)->snd_cwnd; in BPF_PROG()
|
H A D | bpf_cc_cubic.c | 49 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() 79 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() 107 if (tcp_sk(sk)->reordering > TCP_REORDERING) in tcp_may_raise_cwnd() 129 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
|
H A D | tcp_ca_update.c | 33 return tcp_sk(sk)->snd_ssthresh; in BPF_PROG() 39 return tcp_sk(sk)->snd_cwnd; in BPF_PROG()
|
/linux/net/ipv4/ |
H A D | tcp_timer.c | 30 const struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_rto_to_user_timeout() 107 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() 217 struct tcp_sock *tp = tcp_sk(sk); in retransmits_timed_out() 244 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() 310 struct tcp_sock *tp = tcp_sk(sk); in tcp_delack_timer_handler() 381 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer() 430 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rto_stats() 447 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer() 485 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rtx_probe0_timed_out() 522 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer() [all …]
|
H A D | tcp_bbr.c | 246 unsigned int mss = tcp_sk(sk)->mss_cache; in bbr_rate_bytes_per_sec() 268 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt() 288 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate() 306 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal() 323 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd() 334 struct tcp_sock *tp = tcp_sk(sk); in bbr_cwnd_event() 439 struct tcp_sock *tp = tcp_sk(sk); in bbr_packets_in_net_at_edt() 483 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd_to_recover_or_restore() 522 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd() 557 struct tcp_sock *tp = tcp_sk(sk); in bbr_is_next_cycle_phase() [all …]
|
H A D | tcp_input.c | 146 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown && in bpf_skops_parse_hdr() 147 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_parse_hdr() 149 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_parse_hdr() 241 u8 old_ratio = tcp_sk(sk)->scaling_ratio; in tcp_measure_rcv_mss() 244 tcp_sk(sk)->scaling_ratio = val ? val : 1; in tcp_measure_rcv_mss() 246 if (old_ratio != tcp_sk(sk)->scaling_ratio) in tcp_measure_rcv_mss() 247 WRITE_ONCE(tcp_sk(sk)->window_clamp, in tcp_measure_rcv_mss() 251 tcp_sk(sk)->advmss); in tcp_measure_rcv_mss() 287 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss() 303 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack() [all …]
|
H A D | tcp_recovery.c | 7 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd() 60 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss() 97 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost() 151 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout() 189 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd() 220 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost()
|
H A D | tcp_westwood.c | 73 w->snd_una = tcp_sk(sk)->snd_una; in tcp_westwood_init() 127 w->snd_una = tcp_sk(sk)->snd_una; in westwood_update_window() 165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw() 182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count() 219 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin() 242 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
|
H A D | tcp_output.c | 70 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() 99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() 125 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() 146 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() 184 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent() 262 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() 322 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() 335 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() 379 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() 493 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_hdr_opt_len() [all …]
|
H A D | tcp_dctcp.c | 91 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() 121 struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha() 129 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha() 177 struct tcp_sock *tp = tcp_sk(sk); in dctcp_state() 219 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_get_info() 246 struct tcp_sock *tp = tcp_sk(sk);
|
H A D | tcp_cubic.c | 115 return tcp_sk(sk)->tcp_mstamp; in bictcp_clock_us() 120 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() 139 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in cubictcp_init() 149 delta = now - tcp_sk(sk)->lsndtime; in cubictcp_cwnd_event() 326 struct tcp_sock *tp = tcp_sk(sk); in cubictcp_cong_avoid() 343 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_recalc_ssthresh() 388 struct tcp_sock *tp = tcp_sk(sk); in hystart_update() 450 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_acked()
|
H A D | tcp_cdg.c | 143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update() 244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff() 265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid() 302 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_acked() 331 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_ssthresh() 348 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cwnd_event() 376 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_init()
|
H A D | tcp_rate.c | 42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent() 83 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered() 120 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_gen() 196 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_check_app_limited()
|
H A D | tcp_highspeed.c | 102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() 114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() 153 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
|
H A D | tcp_fastopen.c | 172 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_add_skb() 261 tp = tcp_sk(child); in tcp_fastopen_create_child() 340 tcp_sk(sk)->fastopen_no_cookie || in tcp_fastopen_no_cookie() 436 tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; in tcp_fastopen_cookie_check() 450 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_defer_connect() 545 tcp_sk(sk)->syn_fastopen_ch = 1; in tcp_fastopen_active_should_disable() 557 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_disable_ofo_check() 584 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_detect_blackhole()
|
H A D | tcp_dctcp.h | 6 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ece_ack_cwr() 35 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; in dctcp_ece_ack_update()
|
H A D | tcp_scalable.c | 20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid() 36 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
|
/linux/include/net/ |
H A D | tcp.h | 547 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_overflow() 570 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_no_recent_overflow() 690 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers() 693 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers() 771 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check() 1345 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh() 1390 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited() 1418 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; in tcp_pacing_delay() 1456 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer() 1506 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check() [all …]
|