Lines Matching refs:sk

58 void tcp_time_wait(struct sock *sk, int state, int timeo);
275 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
277 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in tcp_under_memory_pressure()
278 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in tcp_under_memory_pressure()
300 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in tcp_wmem_free_skb() argument
302 sk_wmem_queued_add(sk, -skb->truesize); in tcp_wmem_free_skb()
304 sk_mem_uncharge(sk, skb->truesize); in tcp_wmem_free_skb()
306 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb))); in tcp_wmem_free_skb()
310 void sk_forced_mem_schedule(struct sock *sk, int size);
312 bool tcp_check_oom(const struct sock *sk, int shift);
326 void tcp_shutdown(struct sock *sk, int how);
331 void tcp_remove_empty_skb(struct sock *sk);
332 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
333 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
334 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
337 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
338 int tcp_wmem_schedule(struct sock *sk, int copy);
339 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
341 void tcp_release_cb(struct sock *sk);
343 void tcp_write_timer_handler(struct sock *sk);
344 void tcp_delack_timer_handler(struct sock *sk);
345 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
346 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348 void tcp_rcv_space_adjust(struct sock *sk);
349 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350 void tcp_twsk_destructor(struct sock *sk);
352 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
355 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
358 static inline void tcp_dec_quickack_mode(struct sock *sk) in tcp_dec_quickack_mode() argument
360 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
364 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; in tcp_dec_quickack_mode()
392 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
397 void tcp_enter_loss(struct sock *sk);
398 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
400 void tcp_update_metrics(struct sock *sk);
401 void tcp_init_metrics(struct sock *sk);
404 void __tcp_close(struct sock *sk, long timeout);
405 void tcp_close(struct sock *sk, long timeout);
406 void tcp_init_sock(struct sock *sk);
407 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
410 int do_tcp_getsockopt(struct sock *sk, int level,
412 int tcp_getsockopt(struct sock *sk, int level, int optname,
415 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
417 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
419 void tcp_set_keepalive(struct sock *sk, int val);
421 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
423 int tcp_set_rcvlowat(struct sock *sk, int val);
424 int tcp_set_window_clamp(struct sock *sk, int val);
427 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
429 void tcp_data_ready(struct sock *sk);
441 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
443 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
448 struct sock *sk, struct tcphdr *th);
453 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
454 void tcp_v4_mtu_reduced(struct sock *sk);
455 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
456 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
457 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
458 struct sock *tcp_create_openreq_child(const struct sock *sk,
461 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
462 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
467 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
468 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
469 int tcp_connect(struct sock *sk);
475 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
480 int tcp_disconnect(struct sock *sk, int flags);
482 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
483 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
484 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
487 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
491 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
493 struct sock *sk, struct sk_buff *skb,
530 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow() argument
535 if (sk->sk_reuseport) { in tcp_synq_overflow()
538 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow()
548 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_overflow()
550 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now); in tcp_synq_overflow()
554 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow() argument
559 if (sk->sk_reuseport) { in tcp_synq_no_recent_overflow()
562 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow()
571 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_no_recent_overflow()
617 return skb->sk; in cookie_bpf_ok()
620 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
627 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk, in cookie_bpf_check() argument
636 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
644 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
646 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
648 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
650 void tcp_retransmit_timer(struct sock *sk);
653 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
659 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
665 void tcp_send_fin(struct sock *sk);
666 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
670 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
671 void tcp_send_ack(struct sock *sk);
672 void tcp_send_delayed_ack(struct sock *sk);
673 void tcp_send_loss_probe(struct sock *sk);
674 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
679 void tcp_rearm_rto(struct sock *sk);
680 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
681 void tcp_done_with_error(struct sock *sk, int err);
682 void tcp_reset(struct sock *sk, struct sk_buff *skb);
683 void tcp_fin(struct sock *sk);
684 void tcp_check_space(struct sock *sk);
685 void tcp_sack_compress_send_ack(struct sock *sk);
693 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) in tcp_add_receive_queue() argument
697 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_add_receive_queue()
702 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers() argument
704 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers()
705 __sock_put(sk); in tcp_clear_xmit_timers()
707 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers()
708 __sock_put(sk); in tcp_clear_xmit_timers()
710 inet_csk_clear_xmit_timers(sk); in tcp_clear_xmit_timers()
713 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
714 unsigned int tcp_current_mss(struct sock *sk);
715 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
744 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
746 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
749 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
750 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
751 void tcp_read_done(struct sock *sk, size_t len);
753 void tcp_initialize_rcv_mss(struct sock *sk);
755 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
756 int tcp_mss_to_mtu(struct sock *sk, int mss);
757 void tcp_mtup_init(struct sock *sk);
759 static inline void tcp_bound_rto(struct sock *sk) in tcp_bound_rto() argument
761 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
762 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
786 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check() argument
788 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
792 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check()
797 u32 tcp_delack_max(const struct sock *sk);
800 static inline u32 tcp_rto_min(const struct sock *sk) in tcp_rto_min() argument
802 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_rto_min()
803 u32 rto_min = inet_csk(sk)->icsk_rto_min; in tcp_rto_min()
810 static inline u32 tcp_rto_min_us(const struct sock *sk) in tcp_rto_min_us() argument
812 return jiffies_to_usecs(tcp_rto_min(sk)); in tcp_rto_min_us()
843 u32 __tcp_select_window(struct sock *sk);
845 void tcp_send_window_probe(struct sock *sk);
1038 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1171 u32 (*ssthresh)(struct sock *sk);
1174 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1177 void (*set_state)(struct sock *sk, u8 new_state);
1180 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1183 void (*in_ack_event)(struct sock *sk, u32 flags);
1186 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1189 u32 (*min_tso_segs)(struct sock *sk);
1194 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1198 u32 (*undo_cwnd)(struct sock *sk);
1200 u32 (*sndbuf_expand)(struct sock *sk);
1204 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1214 void (*init)(struct sock *sk);
1216 void (*release)(struct sock *sk);
1225 void tcp_assign_congestion_control(struct sock *sk);
1226 void tcp_init_congestion_control(struct sock *sk);
1227 void tcp_cleanup_congestion_control(struct sock *sk);
1233 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1238 u32 tcp_reno_ssthresh(struct sock *sk);
1239 u32 tcp_reno_undo_cwnd(struct sock *sk);
1240 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1255 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn() argument
1257 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1262 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event() argument
1264 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1267 icsk->icsk_ca_ops->cwnd_event(sk, event); in tcp_ca_event()
1271 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1274 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1275 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1277 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1279 void tcp_rate_check_app_limited(struct sock *sk);
1350 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction() argument
1353 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1360 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh() argument
1362 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1364 if (tcp_in_cwnd_reduction(sk)) in tcp_current_ssthresh()
1375 void tcp_enter_cwr(struct sock *sk);
1405 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited() argument
1407 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1425 static inline bool tcp_needs_internal_pacing(const struct sock *sk) in tcp_needs_internal_pacing() argument
1427 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; in tcp_needs_internal_pacing()
1433 static inline unsigned long tcp_pacing_delay(const struct sock *sk) in tcp_pacing_delay() argument
1435 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; in tcp_pacing_delay()
1440 static inline void tcp_reset_xmit_timer(struct sock *sk, in tcp_reset_xmit_timer() argument
1445 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk), in tcp_reset_xmit_timer()
1455 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base() argument
1457 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1461 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when() argument
1465 inet_csk(sk)->icsk_backoff); in tcp_probe0_when()
1466 u64 when = (u64)tcp_probe0_base(sk) << backoff; in tcp_probe0_when()
1471 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer() argument
1473 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
1474 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_check_probe_timer()
1475 tcp_probe0_base(sk), TCP_RTO_MAX); in tcp_check_probe_timer()
1503 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1507 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1508 void tcp_set_state(struct sock *sk, int state);
1509 void tcp_done(struct sock *sk);
1510 int tcp_abort(struct sock *sk, int err);
1518 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1520 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check() argument
1522 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_slow_start_after_idle_check()
1523 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1526 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || in tcp_slow_start_after_idle_check()
1530 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check()
1531 tcp_cwnd_restart(sk, delta); in tcp_slow_start_after_idle_check()
1535 void tcp_select_initial_window(const struct sock *sk, int __space,
1547 static inline int tcp_win_from_space(const struct sock *sk, int space) in tcp_win_from_space() argument
1549 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); in tcp_win_from_space()
1561 static inline int tcp_space_from_win(const struct sock *sk, int win) in tcp_space_from_win() argument
1563 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); in tcp_space_from_win()
1571 static inline void tcp_scaling_ratio_init(struct sock *sk) in tcp_scaling_ratio_init() argument
1573 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; in tcp_scaling_ratio_init()
1577 static inline int tcp_space(const struct sock *sk) in tcp_space() argument
1579 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - in tcp_space()
1580 READ_ONCE(sk->sk_backlog.len) - in tcp_space()
1581 atomic_read(&sk->sk_rmem_alloc)); in tcp_space()
1584 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space() argument
1586 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); in tcp_full_space()
1589 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh) in __tcp_adjust_rcv_ssthresh() argument
1591 int unused_mem = sk_unused_reserved_mem(sk); in __tcp_adjust_rcv_ssthresh()
1592 struct tcp_sock *tp = tcp_sk(sk); in __tcp_adjust_rcv_ssthresh()
1597 tcp_win_from_space(sk, unused_mem)); in __tcp_adjust_rcv_ssthresh()
1600 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) in tcp_adjust_rcv_ssthresh() argument
1602 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); in tcp_adjust_rcv_ssthresh()
1605 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1606 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1614 static inline bool tcp_rmem_pressure(const struct sock *sk) in tcp_rmem_pressure() argument
1618 if (tcp_under_memory_pressure(sk)) in tcp_rmem_pressure()
1621 rcvbuf = READ_ONCE(sk->sk_rcvbuf); in tcp_rmem_pressure()
1624 return atomic_read(&sk->sk_rmem_alloc) > threshold; in tcp_rmem_pressure()
1627 static inline bool tcp_epollin_ready(const struct sock *sk, int target) in tcp_epollin_ready() argument
1629 const struct tcp_sock *tp = tcp_sk(sk); in tcp_epollin_ready()
1635 return (avail >= target) || tcp_rmem_pressure(sk) || in tcp_epollin_ready()
1636 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); in tcp_epollin_ready()
1643 void tcp_enter_memory_pressure(struct sock *sk);
1644 void tcp_leave_memory_pressure(struct sock *sk);
1691 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time() argument
1693 int fin_timeout = tcp_sk(sk)->linger2 ? : in tcp_fin_time()
1694 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); in tcp_fin_time()
1695 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time()
1848 const struct sock *sk, const struct sk_buff *skb);
1849 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1852 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1856 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1858 void tcp_clear_md5_list(struct sock *sk);
1859 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1863 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1867 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1872 return __tcp_md5_do_lookup(sk, l3index, addr, family, false); in tcp_md5_do_lookup()
1876 tcp_md5_do_lookup_any_l3index(const struct sock *sk, in tcp_md5_do_lookup_any_l3index() argument
1881 return __tcp_md5_do_lookup(sk, 0, addr, family, true); in tcp_md5_do_lookup_any_l3index()
1887 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1894 tcp_md5_do_lookup_any_l3index(const struct sock *sk, in tcp_md5_do_lookup_any_l3index() argument
1912 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1914 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1926 void tcp_fastopen_destroy_cipher(struct sock *sk);
1928 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1932 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1933 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1938 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1940 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1953 void tcp_fastopen_active_disable(struct sock *sk);
1954 bool tcp_fastopen_active_should_disable(struct sock *sk);
1955 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1956 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1960 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) in tcp_fastopen_get_ctx() argument
1964 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_ctx()
1966 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); in tcp_fastopen_get_ctx()
1998 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1999 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2018 void tcp_write_queue_purge(struct sock *sk);
2020 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) in tcp_rtx_queue_head() argument
2022 return skb_rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_head()
2025 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) in tcp_rtx_queue_tail() argument
2027 return skb_rb_last(&sk->tcp_rtx_queue); in tcp_rtx_queue_tail()
2030 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail() argument
2032 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
2035 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ argument
2036 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2038 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head() argument
2040 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
2043 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last() argument
2046 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
2056 static inline bool tcp_write_queue_empty(const struct sock *sk) in tcp_write_queue_empty() argument
2058 const struct tcp_sock *tp = tcp_sk(sk); in tcp_write_queue_empty()
2063 static inline bool tcp_rtx_queue_empty(const struct sock *sk) in tcp_rtx_queue_empty() argument
2065 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); in tcp_rtx_queue_empty()
2068 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) in tcp_rtx_and_write_queues_empty() argument
2070 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); in tcp_rtx_and_write_queues_empty()
2073 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail() argument
2075 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_add_write_queue_tail()
2078 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
2079 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_add_write_queue_tail()
2085 struct sock *sk) in tcp_insert_write_queue_before() argument
2087 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
2090 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue() argument
2093 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
2098 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink() argument
2101 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
2104 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink_and_free() argument
2107 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_unlink_and_free()
2108 tcp_wmem_free_skb(sk, skb); in tcp_rtx_queue_unlink_and_free()
2111 static inline void tcp_write_collapse_fence(struct sock *sk) in tcp_write_collapse_fence() argument
2113 struct sk_buff *skb = tcp_write_queue_tail(sk); in tcp_write_collapse_fence()
2119 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames() argument
2121 if (tcp_send_head(sk)) { in tcp_push_pending_frames()
2122 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
2124 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); in tcp_push_pending_frames()
2143 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack() argument
2145 tcp_sk(sk)->highest_sack = skb_rb_next(skb); in tcp_advance_highest_sack()
2148 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack() argument
2150 return tcp_sk(sk)->highest_sack; in tcp_highest_sack()
2153 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset() argument
2155 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset()
2159 static inline void tcp_highest_sack_replace(struct sock *sk, in tcp_highest_sack_replace() argument
2163 if (old == tcp_highest_sack(sk)) in tcp_highest_sack_replace()
2164 tcp_sk(sk)->highest_sack = new; in tcp_highest_sack_replace()
2168 static inline bool inet_sk_transparent(const struct sock *sk) in inet_sk_transparent() argument
2170 switch (sk->sk_state) { in inet_sk_transparent()
2172 return inet_twsk(sk)->tw_transparent; in inet_sk_transparent()
2174 return inet_rsk(inet_reqsk(sk))->no_srccheck; in inet_sk_transparent()
2176 return inet_test_bit(TRANSPARENT, sk); in inet_sk_transparent()
2212 void tcp_v4_destroy_sock(struct sock *sk);
2242 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2249 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2252 struct sock *sk, struct sk_buff *skb);
2257 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2261 const struct sock *sk,
2263 int (*md5_parse)(struct sock *sk,
2269 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2270 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2274 const struct sock *sk,
2277 const struct sock *sk, const struct sk_buff *skb,
2285 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2289 const struct sock *sk,
2293 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2296 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2305 struct dst_entry *(*route_req)(const struct sock *sk,
2312 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2326 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2329 tcp_synq_overflow(sk); in cookie_init_sequence()
2330 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); in cookie_init_sequence()
2335 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2359 static inline void tcp_get_current_key(const struct sock *sk, in tcp_get_current_key() argument
2363 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_current_key()
2371 lockdep_sock_is_held(sk)); in tcp_get_current_key()
2382 out->md5_key = tp->af_specific->md5_lookup(sk, sk); in tcp_get_current_key()
2412 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2413 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2416 extern bool tcp_rack_mark_lost(struct sock *sk);
2419 extern void tcp_rack_reo_timeout(struct sock *sk);
2420 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2439 static inline void tcp_plb_init(const struct sock *sk, in tcp_plb_init() argument
2445 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2447 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2448 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2450 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str) in tcp_warn_once() argument
2455 tcp_snd_cwnd(tcp_sk(sk)), in tcp_warn_once()
2456 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, in tcp_warn_once()
2457 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, in tcp_warn_once()
2458 tcp_sk(sk)->tlp_high_seq, sk->sk_state, in tcp_warn_once()
2459 inet_csk(sk)->icsk_ca_state, in tcp_warn_once()
2460 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, in tcp_warn_once()
2461 inet_csk(sk)->icsk_pmtu_cookie); in tcp_warn_once()
2465 static inline s64 tcp_rto_delta_us(const struct sock *sk) in tcp_rto_delta_us() argument
2467 const struct sk_buff *skb = tcp_rtx_queue_head(sk); in tcp_rto_delta_us()
2468 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us()
2473 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; in tcp_rto_delta_us()
2475 tcp_warn_once(sk, 1, "rtx queue empty: "); in tcp_rto_delta_us()
2517 static inline int tcp_inq(struct sock *sk) in tcp_inq() argument
2519 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
2522 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_inq()
2524 } else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_inq()
2532 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_inq()
2564 static inline void tcp_listendrop(const struct sock *sk) in tcp_listendrop() argument
2566 atomic_inc(&((struct sock *)sk)->sk_drops); in tcp_listendrop()
2567 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_listendrop()
2584 int (*init)(struct sock *sk);
2586 void (*update)(struct sock *sk, struct proto *p,
2587 void (*write_space)(struct sock *sk));
2589 void (*release)(struct sock *sk);
2591 int (*get_info)(struct sock *sk, struct sk_buff *skb);
2592 size_t (*get_info_size)(const struct sock *sk);
2602 int tcp_set_ulp(struct sock *sk, const char *name);
2604 void tcp_cleanup_ulp(struct sock *sk);
2605 void tcp_update_ulp(struct sock *sk, struct proto *p,
2606 void (*write_space)(struct sock *sk));
2617 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2618 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2627 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2629 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) in tcp_eat_skb() argument
2634 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2639 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) in tcp_bpf_clone() argument
2666 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2672 if (sk_fullsock(sk)) { in tcp_call_bpf()
2674 sock_owned_by_me(sk); in tcp_call_bpf()
2677 sock_ops.sk = sk; in tcp_call_bpf()
2690 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2694 return tcp_call_bpf(sk, op, 2, args); in tcp_call_bpf_2arg()
2697 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2702 return tcp_call_bpf(sk, op, 3, args); in tcp_call_bpf_3arg()
2706 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2711 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2716 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2724 static inline u32 tcp_timeout_init(struct sock *sk) in tcp_timeout_init() argument
2728 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); in tcp_timeout_init()
2735 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) in tcp_rwnd_init_bpf() argument
2739 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); in tcp_rwnd_init_bpf()
2746 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) in tcp_bpf_ca_needs_ecn() argument
2748 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); in tcp_bpf_ca_needs_ecn()
2751 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt) in tcp_bpf_rtt() argument
2753 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) in tcp_bpf_rtt()
2754 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt); in tcp_bpf_rtt()
2763 void (*cad)(struct sock *sk, u32 ack_seq));
2779 static inline u64 tcp_transmit_time(const struct sock *sk) in tcp_transmit_time() argument
2782 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? in tcp_transmit_time()
2783 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; in tcp_transmit_time()
2813 static inline bool tcp_ao_required(struct sock *sk, const void *saddr, in tcp_ao_required() argument
2823 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, in tcp_ao_required()
2824 lockdep_sock_is_held(sk)); in tcp_ao_required()
2828 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); in tcp_ao_required()
2831 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); in tcp_ao_required()
2840 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,