| /linux/tools/testing/selftests/ublk/ |
| H A D | test_generic_02.sh | 33 fio --name=write_seq \
|
| H A D | test_generic_01.sh | 33 fio --name=write_seq \
|
| H A D | test_generic_12.sh | 35 fio --name=write_seq \
|
| /linux/net/mptcp/ |
| H A D | protocol.c | 452 msk->write_seq == READ_ONCE(msk->snd_una); in mptcp_pending_data_fin_ack() 988 static bool mptcp_skb_can_collapse_to(u64 write_seq, in mptcp_skb_can_collapse_to() argument 998 return mpext && mpext->data_seq + mpext->data_len == write_seq && in mptcp_skb_can_collapse_to() 1014 df->data_seq + df->data_len == msk->write_seq; in mptcp_frag_can_collapse_to() 1081 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { in __mptcp_clean_una() 1148 dfrag->data_seq = msk->write_seq; in mptcp_carve_data_frag() 1375 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag() 1749 msk->snd_nxt + 1 == msk->write_seq) in __mptcp_subflow_push_pending() 1843 not_sent = msk->write_seq - msk->snd_nxt; in mptcp_send_limit() 1954 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); in mptcp_sendmsg() [all …]
|
| H A D | protocol.h | 279 u64 write_seq; member 942 READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt); in mptcp_data_fin_enabled() 959 notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt); in mptcp_stream_memory_free()
|
| H A D | subflow.c | 472 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in __mptcp_sync_state() 473 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in __mptcp_sync_state()
|
| H A D | sockopt.c | 998 info->mptcpi_write_seq = msk->write_seq; in mptcp_diag_fill_info()
|
| H A D | options.c | 540 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; in mptcp_write_data_fin()
|
| /linux/Documentation/translations/zh_CN/networking/ |
| H A D | timestamping.rst | 203 差异是由于基于 snd_una 与 write_seq 的。snd_una 是 peer 确认的 stream 204 的偏移量。这取决于外部因素,例如网络 RTT。write_seq 是进程写入的最后一个
|
| /linux/net/tls/ |
| H A D | tls_device.c | 290 record->end_seq = tp->write_seq + record->len; in tls_push_record() 295 tls_device_resync_tx(sk, ctx, tp->write_seq); in tls_push_record() 1129 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload() 1160 tcp_sk(sk)->write_seq); in tls_set_device_offload() 1162 tcp_sk(sk)->write_seq, rec_seq, rc); in tls_set_device_offload()
|
| /linux/net/ipv4/ |
| H A D | tcp.c | 667 answ = READ_ONCE(tp->write_seq) - tp->snd_una; in tcp_ioctl() 676 answ = READ_ONCE(tp->write_seq) - in tcp_ioctl() 691 tp->pushed_seq = tp->write_seq; in tcp_mark_push() 696 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); in forced_push() 704 tcb->seq = tcb->end_seq = tp->write_seq; in tcp_skb_entail() 720 tp->snd_up = tp->write_seq; in tcp_mark_urg() 1337 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in tcp_sendmsg_locked() 3369 } else if (tp->snd_nxt != tp->write_seq && in tcp_disconnect() 3399 seq = tp->write_seq in tcp_disconnect() [all...] |
| H A D | tcp_rate.c | 199 tp->write_seq - tp->snd_nxt < tp->mss_cache && in tcp_rate_check_app_limited()
|
| H A D | tcp_output.c | 1663 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb() 2647 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe() 3782 tp->write_seq++; in tcp_send_fin() 3804 tcp_init_nondata_skb(skb, sk, tp->write_seq, in tcp_send_fin() 4123 tp->snd_una = tp->write_seq; in tcp_connect_init() 4124 tp->snd_sml = tp->write_seq; in tcp_connect_init() 4125 tp->snd_up = tp->write_seq; in tcp_connect_init() 4126 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init() 4149 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb() 4329 tcp_init_nondata_skb(buff, sk, tp->write_seq, TCPHDR_SYN); in tcp_connect() [all …]
|
| H A D | tcp_input.c | 7089 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process() 7137 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process() 7144 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_iter_tcp4.c | 127 tp->write_seq - tp->snd_una, rx_queue, in dump_tcp_sock()
|
| H A D | bpf_iter_tcp6.c | 131 tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, in dump_tcp6_sock()
|
| /linux/drivers/target/iscsi/cxgbit/ |
| H A D | cxgbit.h | 223 u32 write_seq; member
|
| H A D | cxgbit_target.c | 257 (!before(csk->write_seq, in cxgbit_push_tx_frames() 327 csk->write_seq += skb->len + in cxgbit_queue_skb()
|
| /linux/net/ipv6/ |
| H A D | tcp_ipv6.c | 211 WRITE_ONCE(tp->write_seq, 0); in tcp_v6_connect() 321 if (!tp->write_seq) in tcp_v6_connect() 322 WRITE_ONCE(tp->write_seq, in tcp_v6_connect() 2197 READ_ONCE(tp->write_seq) - tp->snd_una,
|
| /linux/drivers/scsi/cxgbi/ |
| H A D | libcxgbi.h | 169 u32 write_seq; member
|
| H A D | libcxgbi.c | 833 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; in cxgbi_sock_established() 1117 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; in cxgbi_sock_skb_entail() 2296 before((csk->snd_win + csk->snd_una), csk->write_seq)) { in cxgbi_sock_tx_queue_up() 2299 csk, csk->state, csk->flags, csk->tid, csk->write_seq, in cxgbi_sock_tx_queue_up() 2337 csk->write_seq += (skb->len + extra_len); in cxgbi_sock_tx_queue_up()
|
| /linux/net/rds/ |
| H A D | tcp.c | 96 return tcp_sk(tc->t_sock->sk)->write_seq; in rds_tcp_write_seq()
|
| /linux/drivers/block/drbd/ |
| H A D | drbd_debugfs.c | 392 answ = tp->write_seq - tp->snd_una; in in_flight_summary_show()
|
| /linux/Documentation/networking/ |
| H A D | timestamping.rst | 238 The difference is due to being based on snd_una versus write_seq. 241 write_seq is the last byte written by the process. This offset is
|
| /linux/drivers/scsi/cxgbi/cxgb3i/ |
| H A D | cxgb3i.c | 215 req->rsvd = htonl(csk->write_seq); in send_close_req()
|