Home
last modified time | relevance | path

Searched refs:inet_csk (Results 1 – 25 of 48) sorted by relevance

12

/linux/include/net/
H A Dinet_connection_sock.h151 #define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk) macro
155 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
180 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
185 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
190 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
206 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
231 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
282 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
287 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
307 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
[all …]
H A Dtcp.h414 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
875 return READ_ONCE(inet_csk(sk)->icsk_rto_max); in tcp_rto_max()
880 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); in tcp_bound_rto()
902 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min); in tcp_rto_min()
1417 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1424 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_accecn()
1431 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_ect_1_negotiation()
1438 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_no_fallback_rfc3168()
1445 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1532 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
[all …]
H A Despintcp.h35 const struct inet_connection_sock *icsk = inet_csk(sk); in espintcp_getctx()
/linux/net/ipv4/
H A Dtcp_timer.c30 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout()
52 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout()
222 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
245 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
311 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
391 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
442 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_rto_stats()
459 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
498 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rtx_probe0_timed_out()
539 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
[all …]
H A Dinet_connection_sock.c577 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port()
579 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port()
580 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); in inet_csk_get_port()
603 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect()
651 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept()
723 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers()
733 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers()
745 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers_sync()
1006 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in __inet_csk_reqsk_queue_drop()
1052 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); in reqsk_timer_handler()
[all …]
H A Dtcp_ulp.c106 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp()
114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp()
132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp()
H A Dtcp_fastopen.c57 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
130 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher()
169 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher()
331 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child()
335 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child()
402 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
676 u32 timeouts = inet_csk(sk)->icsk_retransmits; in tcp_fastopen_active_detect_blackhole()
H A Dinet_hashtables.c209 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
210 inet_csk(sk)->icsk_bind2_hash = tb2; in inet_bind_hash()
230 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
231 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port()
236 if (inet_csk(sk)->icsk_bind2_hash) { in __inet_put_port()
237 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; in __inet_put_port()
240 inet_csk(sk)->icsk_bind2_hash = NULL; in __inet_put_port()
275 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
276 tb2 = inet_csk(sk)->icsk_bind2_hash; in __inet_inherit_port()
765 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock()
[all …]
H A Dtcp_input.c229 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
313 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
325 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
338 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
538 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in __tcp_accecn_process()
555 ((1 << inet_csk(sk)->icsk_ca_state) & in __tcp_accecn_process()
608 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_sndbuf_expand()
673 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
727 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
781 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
[all …]
H A Dtcp_minisocks.c328 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
500 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child()
562 newicsk = inet_csk(newsk); in tcp_create_openreq_child()
913 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && in tcp_check_req()
926 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
936 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); in tcp_check_req()
H A Dtcp_diag.c118 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux()
148 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size()
284 return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags, in sk_diag_fill()
369 if (inet_sk_diag_fill(sk, inet_csk(sk), skb, in tcp_diag_dump()
H A Dtcp_htcp.c84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt()
104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
H A Dtcp.c426 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
1561 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_cleanup_rbuf()
3019 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
3363 inet_csk(sk)->icsk_backoff = 0; in tcp_write_queue_purge()
3369 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
3702 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; in __tcp_sock_set_quickack()
3722 WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); in tcp_sock_set_syncnt()
3735 WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); in tcp_sock_set_user_timeout()
3847 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
3948 WRITE_ONCE(inet_csk(sk)->icsk_rto_max, msecs_to_jiffies(val)); in do_tcp_setsockopt()
[all …]
/linux/net/dccp/
H A Doutput.c
H A Dminisocks.c
H A Dtimer.c
H A Dipv6.c
H A Dproto.c
H A Ddiag.c
/linux/tools/testing/selftests/bpf/progs/
H A Dbpf_dctcp.c161 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) in dctcp_react_to_loss()
193 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update()
197 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
H A Dbpf_tracing_net.h163 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
170 return (void *)inet_csk(sk)->icsk_ca_priv; in tcp_is_cwnd_limited()
144 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) inet_csk() function
H A Dbpf_cc_cubic.c123 (1 << inet_csk(sk)->icsk_ca_state)) { in BPF_PROG()
130 inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { in BPF_PROG()
/linux/net/tls/
H A Dtls_toe.c48 struct inet_connection_sock *icsk = inet_csk(sk); in tls_toe_sk_destruct()
/linux/net/mptcp/
H A Ddiag.c32 sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data); in subflow_get_info()
H A Dsubflow.c770 struct inet_connection_sock *icsk = inet_csk(sk); in subflow_ulp_fallback()
788 if (inet_csk(ssk)->icsk_ulp_ops) { in mptcp_subflow_drop_ctx()
1520 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) in subflow_data_ready()
1537 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) in subflow_data_ready()
1538 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in subflow_data_ready()
1566 struct inet_connection_sock *icsk = inet_csk(sk); in mptcpv6_handle_mapped()
1844 struct inet_connection_sock *icsk = inet_csk(sk); in subflow_create_ctx()
1895 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; in mptcp_subflow_queue_clean()
1966 struct inet_connection_sock *icsk = inet_csk(sk); in subflow_ulp_init()

12