tcp.c (2d074918fb1568f398777343ff9a28049fb86337) tcp.c (8934ce2fd08171e8605f7fada91ee7619fe17ab8)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 439 unchanged lines hidden (view full) ---

448 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
449
450 icsk->icsk_sync_mss = tcp_sync_mss;
451
452 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
453 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
454
455 sk_sockets_allocated_inc(sk);
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 439 unchanged lines hidden (view full) ---

448 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
449
450 icsk->icsk_sync_mss = tcp_sync_mss;
451
452 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
453 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
454
455 sk_sockets_allocated_inc(sk);
456 sk->sk_route_forced_caps = NETIF_F_GSO;
456}
457EXPORT_SYMBOL(tcp_init_sock);
458
459void tcp_init_transfer(struct sock *sk, int bpf_op)
460{
461 struct inet_connection_sock *icsk = inet_csk(sk);
462
463 tcp_mtup_init(sk);

--- 15 unchanged lines hidden (view full) ---

479 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
480 if (tsflags & SOF_TIMESTAMPING_TX_ACK)
481 tcb->txstamp_ack = 1;
482 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
483 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
484 }
485}
486
457}
458EXPORT_SYMBOL(tcp_init_sock);
459
460void tcp_init_transfer(struct sock *sk, int bpf_op)
461{
462 struct inet_connection_sock *icsk = inet_csk(sk);
463
464 tcp_mtup_init(sk);

--- 15 unchanged lines hidden (view full) ---

480 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
481 if (tsflags & SOF_TIMESTAMPING_TX_ACK)
482 tcb->txstamp_ack = 1;
483 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
484 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
485 }
486}
487
488static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
489 int target, struct sock *sk)
490{
491 return (tp->rcv_nxt - tp->copied_seq >= target) ||
492 (sk->sk_prot->stream_memory_read ?
493 sk->sk_prot->stream_memory_read(sk) : false);
494}
495
487/*
488 * Wait for a TCP event.
489 *
490 * Note that we don't need to lock the socket, as the upper poll layers
491 * take care of normal races (between the test and the event) and we don't
492 * go look at any of the socket buffers directly.
493 */
494__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)

--- 53 unchanged lines hidden (view full) ---

548 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
549 int target = sock_rcvlowat(sk, 0, INT_MAX);
550
551 if (tp->urg_seq == tp->copied_seq &&
552 !sock_flag(sk, SOCK_URGINLINE) &&
553 tp->urg_data)
554 target++;
555
496/*
497 * Wait for a TCP event.
498 *
499 * Note that we don't need to lock the socket, as the upper poll layers
500 * take care of normal races (between the test and the event) and we don't
501 * go look at any of the socket buffers directly.
502 */
503__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)

--- 53 unchanged lines hidden (view full) ---

557 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
558 int target = sock_rcvlowat(sk, 0, INT_MAX);
559
560 if (tp->urg_seq == tp->copied_seq &&
561 !sock_flag(sk, SOCK_URGINLINE) &&
562 tp->urg_data)
563 target++;
564
556 if (tp->rcv_nxt - tp->copied_seq >= target)
565 if (tcp_stream_is_readable(tp, target, sk))
557 mask |= EPOLLIN | EPOLLRDNORM;
558
559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
560 if (sk_stream_is_writeable(sk)) {
561 mask |= EPOLLOUT | EPOLLWRNORM;
562 } else { /* send SIGIO later */
563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);

--- 327 unchanged lines hidden (view full) ---

892}
893
894static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
895 int large_allowed)
896{
897 struct tcp_sock *tp = tcp_sk(sk);
898 u32 new_size_goal, size_goal;
899
566 mask |= EPOLLIN | EPOLLRDNORM;
567
568 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
569 if (sk_stream_is_writeable(sk)) {
570 mask |= EPOLLOUT | EPOLLWRNORM;
571 } else { /* send SIGIO later */
572 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
573 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);

--- 327 unchanged lines hidden (view full) ---

901}
902
903static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
904 int large_allowed)
905{
906 struct tcp_sock *tp = tcp_sk(sk);
907 u32 new_size_goal, size_goal;
908
900 if (!large_allowed || !sk_can_gso(sk))
909 if (!large_allowed)
901 return mss_now;
902
903 /* Note : tcp_tso_autosize() will eventually split this later */
904 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
905 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
906
907 /* We try hard to avoid divides here */
908 size_goal = tp->gso_segs * mss_now;

--- 79 unchanged lines hidden (view full) ---

988 goto wait_for_memory;
989
990 if (can_coalesce) {
991 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
992 } else {
993 get_page(page);
994 skb_fill_page_desc(skb, i, page, offset, copy);
995 }
910 return mss_now;
911
912 /* Note : tcp_tso_autosize() will eventually split this later */
913 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
914 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
915
916 /* We try hard to avoid divides here */
917 size_goal = tp->gso_segs * mss_now;

--- 79 unchanged lines hidden (view full) ---

997 goto wait_for_memory;
998
999 if (can_coalesce) {
1000 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1001 } else {
1002 get_page(page);
1003 skb_fill_page_desc(skb, i, page, offset, copy);
1004 }
996 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
997
1005
1006 if (!(flags & MSG_NO_SHARED_FRAGS))
1007 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1008
998 skb->len += copy;
999 skb->data_len += copy;
1000 skb->truesize += copy;
1001 sk->sk_wmem_queued += copy;
1002 sk_mem_charge(sk, copy);
1003 skb->ip_summed = CHECKSUM_PARTIAL;
1004 tp->write_seq += copy;
1005 TCP_SKB_CB(skb)->end_seq += copy;

--- 51 unchanged lines hidden (view full) ---

1057 }
1058 return sk_stream_error(sk, flags, err);
1059}
1060EXPORT_SYMBOL_GPL(do_tcp_sendpages);
1061
1062int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
1063 size_t size, int flags)
1064{
1009 skb->len += copy;
1010 skb->data_len += copy;
1011 skb->truesize += copy;
1012 sk->sk_wmem_queued += copy;
1013 sk_mem_charge(sk, copy);
1014 skb->ip_summed = CHECKSUM_PARTIAL;
1015 tp->write_seq += copy;
1016 TCP_SKB_CB(skb)->end_seq += copy;

--- 51 unchanged lines hidden (view full) ---

1068 }
1069 return sk_stream_error(sk, flags, err);
1070}
1071EXPORT_SYMBOL_GPL(do_tcp_sendpages);
1072
1073int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
1074 size_t size, int flags)
1075{
1065 if (!(sk->sk_route_caps & NETIF_F_SG) ||
1066 !sk_check_csum_caps(sk))
1076 if (!(sk->sk_route_caps & NETIF_F_SG))
1067 return sock_no_sendpage_locked(sk, page, offset, size, flags);
1068
1069 tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1070
1071 return do_tcp_sendpages(sk, page, offset, size, flags);
1072}
1073EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
1074

--- 22 unchanged lines hidden (view full) ---

1097 */
1098static int linear_payload_sz(bool first_skb)
1099{
1100 if (first_skb)
1101 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1102 return 0;
1103}
1104
1077 return sock_no_sendpage_locked(sk, page, offset, size, flags);
1078
1079 tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1080
1081 return do_tcp_sendpages(sk, page, offset, size, flags);
1082}
1083EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
1084

--- 22 unchanged lines hidden (view full) ---

1107 */
1108static int linear_payload_sz(bool first_skb)
1109{
1110 if (first_skb)
1111 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1112 return 0;
1113}
1114
1105static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
1115static int select_size(bool first_skb, bool zc)
1106{
1116{
1107 const struct tcp_sock *tp = tcp_sk(sk);
1108 int tmp = tp->mss_cache;
1109
1110 if (sg) {
1111 if (zc)
1112 return 0;
1113
1114 if (sk_can_gso(sk)) {
1115 tmp = linear_payload_sz(first_skb);
1116 } else {
1117 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1118
1119 if (tmp >= pgbreak &&
1120 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1121 tmp = pgbreak;
1122 }
1123 }
1124
1125 return tmp;
1117 if (zc)
1118 return 0;
1119 return linear_payload_sz(first_skb);
1126}
1127
1128void tcp_free_fastopen_req(struct tcp_sock *tp)
1129{
1130 if (tp->fastopen_req) {
1131 kfree(tp->fastopen_req);
1132 tp->fastopen_req = NULL;
1133 }

--- 48 unchanged lines hidden (view full) ---

1182{
1183 struct tcp_sock *tp = tcp_sk(sk);
1184 struct ubuf_info *uarg = NULL;
1185 struct sk_buff *skb;
1186 struct sockcm_cookie sockc;
1187 int flags, err, copied = 0;
1188 int mss_now = 0, size_goal, copied_syn = 0;
1189 bool process_backlog = false;
1120}
1121
1122void tcp_free_fastopen_req(struct tcp_sock *tp)
1123{
1124 if (tp->fastopen_req) {
1125 kfree(tp->fastopen_req);
1126 tp->fastopen_req = NULL;
1127 }

--- 48 unchanged lines hidden (view full) ---

1176{
1177 struct tcp_sock *tp = tcp_sk(sk);
1178 struct ubuf_info *uarg = NULL;
1179 struct sk_buff *skb;
1180 struct sockcm_cookie sockc;
1181 int flags, err, copied = 0;
1182 int mss_now = 0, size_goal, copied_syn = 0;
1183 bool process_backlog = false;
1190 bool sg, zc = false;
1184 bool zc = false;
1191 long timeo;
1192
1193 flags = msg->msg_flags;
1194
1195 if (flags & MSG_ZEROCOPY && size) {
1196 if (sk->sk_state != TCP_ESTABLISHED) {
1197 err = -EINVAL;
1198 goto out_err;
1199 }
1200
1201 skb = tcp_write_queue_tail(sk);
1202 uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
1203 if (!uarg) {
1204 err = -ENOBUFS;
1205 goto out_err;
1206 }
1207
1185 long timeo;
1186
1187 flags = msg->msg_flags;
1188
1189 if (flags & MSG_ZEROCOPY && size) {
1190 if (sk->sk_state != TCP_ESTABLISHED) {
1191 err = -EINVAL;
1192 goto out_err;
1193 }
1194
1195 skb = tcp_write_queue_tail(sk);
1196 uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
1197 if (!uarg) {
1198 err = -ENOBUFS;
1199 goto out_err;
1200 }
1201
1208 zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
1202 zc = sk->sk_route_caps & NETIF_F_SG;
1209 if (!zc)
1210 uarg->zerocopy = 0;
1211 }
1212
1213 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
1214 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1215 if (err == -EINPROGRESS && copied_syn > 0)
1216 goto out;

--- 46 unchanged lines hidden (view full) ---

1263
1264restart:
1265 mss_now = tcp_send_mss(sk, &size_goal, flags);
1266
1267 err = -EPIPE;
1268 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1269 goto do_error;
1270
1203 if (!zc)
1204 uarg->zerocopy = 0;
1205 }
1206
1207 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
1208 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1209 if (err == -EINPROGRESS && copied_syn > 0)
1210 goto out;

--- 46 unchanged lines hidden (view full) ---

1257
1258restart:
1259 mss_now = tcp_send_mss(sk, &size_goal, flags);
1260
1261 err = -EPIPE;
1262 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1263 goto do_error;
1264
1271 sg = !!(sk->sk_route_caps & NETIF_F_SG);
1272
1273 while (msg_data_left(msg)) {
1274 int copy = 0;
1265 while (msg_data_left(msg)) {
1266 int copy = 0;
1275 int max = size_goal;
1276
1277 skb = tcp_write_queue_tail(sk);
1267
1268 skb = tcp_write_queue_tail(sk);
1278 if (skb) {
1279 if (skb->ip_summed == CHECKSUM_NONE)
1280 max = mss_now;
1281 copy = max - skb->len;
1282 }
1269 if (skb)
1270 copy = size_goal - skb->len;
1283
1284 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1285 bool first_skb;
1286 int linear;
1287
1288new_segment:
1289 /* Allocate new segment. If the interface is SG,
1290 * allocate skb fitting to single page.
1291 */
1292 if (!sk_stream_memory_free(sk))
1293 goto wait_for_sndbuf;
1294
1295 if (process_backlog && sk_flush_backlog(sk)) {
1296 process_backlog = false;
1297 goto restart;
1298 }
1299 first_skb = tcp_rtx_and_write_queues_empty(sk);
1271
1272 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1273 bool first_skb;
1274 int linear;
1275
1276new_segment:
1277 /* Allocate new segment. If the interface is SG,
1278 * allocate skb fitting to single page.
1279 */
1280 if (!sk_stream_memory_free(sk))
1281 goto wait_for_sndbuf;
1282
1283 if (process_backlog && sk_flush_backlog(sk)) {
1284 process_backlog = false;
1285 goto restart;
1286 }
1287 first_skb = tcp_rtx_and_write_queues_empty(sk);
1300 linear = select_size(sk, sg, first_skb, zc);
1288 linear = select_size(first_skb, zc);
1301 skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
1302 first_skb);
1303 if (!skb)
1304 goto wait_for_memory;
1305
1306 process_backlog = true;
1289 skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
1290 first_skb);
1291 if (!skb)
1292 goto wait_for_memory;
1293
1294 process_backlog = true;
1307 /*
1308 * Check whether we can use HW checksum.
1309 */
1310 if (sk_check_csum_caps(sk))
1311 skb->ip_summed = CHECKSUM_PARTIAL;
1295 skb->ip_summed = CHECKSUM_PARTIAL;
1312
1313 skb_entail(sk, skb);
1314 copy = size_goal;
1296
1297 skb_entail(sk, skb);
1298 copy = size_goal;
1315 max = size_goal;
1316
1317 /* All packets are restored as if they have
1318 * already been sent. skb_mstamp isn't set to
1319 * avoid wrong rtt estimation.
1320 */
1321 if (tp->repair)
1322 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1323 }

--- 14 unchanged lines hidden (view full) ---

1338 int i = skb_shinfo(skb)->nr_frags;
1339 struct page_frag *pfrag = sk_page_frag(sk);
1340
1341 if (!sk_page_frag_refill(sk, pfrag))
1342 goto wait_for_memory;
1343
1344 if (!skb_can_coalesce(skb, i, pfrag->page,
1345 pfrag->offset)) {
1299
1300 /* All packets are restored as if they have
1301 * already been sent. skb_mstamp isn't set to
1302 * avoid wrong rtt estimation.
1303 */
1304 if (tp->repair)
1305 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1306 }

--- 14 unchanged lines hidden (view full) ---

1321 int i = skb_shinfo(skb)->nr_frags;
1322 struct page_frag *pfrag = sk_page_frag(sk);
1323
1324 if (!sk_page_frag_refill(sk, pfrag))
1325 goto wait_for_memory;
1326
1327 if (!skb_can_coalesce(skb, i, pfrag->page,
1328 pfrag->offset)) {
1346 if (i >= sysctl_max_skb_frags || !sg) {
1329 if (i >= sysctl_max_skb_frags) {
1347 tcp_mark_push(tp, skb);
1348 goto new_segment;
1349 }
1350 merge = false;
1351 }
1352
1353 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1354

--- 36 unchanged lines hidden (view full) ---

1391
1392 copied += copy;
1393 if (!msg_data_left(msg)) {
1394 if (unlikely(flags & MSG_EOR))
1395 TCP_SKB_CB(skb)->eor = 1;
1396 goto out;
1397 }
1398
1330 tcp_mark_push(tp, skb);
1331 goto new_segment;
1332 }
1333 merge = false;
1334 }
1335
1336 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1337

--- 36 unchanged lines hidden (view full) ---

1374
1375 copied += copy;
1376 if (!msg_data_left(msg)) {
1377 if (unlikely(flags & MSG_EOR))
1378 TCP_SKB_CB(skb)->eor = 1;
1379 goto out;
1380 }
1381
1399 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1382 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
1400 continue;
1401
1402 if (forced_push(tp)) {
1403 tcp_mark_push(tp, skb);
1404 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1405 } else if (skb == tcp_send_head(sk))
1406 tcp_push_one(sk, mss_now);
1407 continue;

--- 1645 unchanged lines hidden (view full) ---

3053{
3054 const struct tcp_sock *tp = tcp_sk(sk);
3055 struct sk_buff *stats;
3056 struct tcp_info info;
3057 u64 rate64;
3058 u32 rate;
3059
3060 stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
1383 continue;
1384
1385 if (forced_push(tp)) {
1386 tcp_mark_push(tp, skb);
1387 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1388 } else if (skb == tcp_send_head(sk))
1389 tcp_push_one(sk, mss_now);
1390 continue;

--- 1645 unchanged lines hidden (view full) ---

3036{
3037 const struct tcp_sock *tp = tcp_sk(sk);
3038 struct sk_buff *stats;
3039 struct tcp_info info;
3040 u64 rate64;
3041 u32 rate;
3042
3043 stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
3061 3 * nla_total_size(sizeof(u32)) +
3062 2 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
3044 5 * nla_total_size(sizeof(u32)) +
3045 3 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
3063 if (!stats)
3064 return NULL;
3065
3066 tcp_get_info_chrono_stats(tp, &info);
3067 nla_put_u64_64bit(stats, TCP_NLA_BUSY,
3068 info.tcpi_busy_time, TCP_NLA_PAD);
3069 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
3070 info.tcpi_rwnd_limited, TCP_NLA_PAD);

--- 12 unchanged lines hidden (view full) ---

3083 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3084
3085 nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
3086 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3087 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3088
3089 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3090 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
3046 if (!stats)
3047 return NULL;
3048
3049 tcp_get_info_chrono_stats(tp, &info);
3050 nla_put_u64_64bit(stats, TCP_NLA_BUSY,
3051 info.tcpi_busy_time, TCP_NLA_PAD);
3052 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
3053 info.tcpi_rwnd_limited, TCP_NLA_PAD);

--- 12 unchanged lines hidden (view full) ---

3066 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3067
3068 nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
3069 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3070 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3071
3072 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3073 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
3074 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3075
3076 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
3077 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
3091 return stats;
3092}
3093
3094static int do_tcp_getsockopt(struct sock *sk, int level,
3095 int optname, char __user *optval, int __user *optlen)
3096{
3097 struct inet_connection_sock *icsk = inet_csk(sk);
3098 struct tcp_sock *tp = tcp_sk(sk);

--- 586 unchanged lines hidden ---
3078 return stats;
3079}
3080
3081static int do_tcp_getsockopt(struct sock *sk, int level,
3082 int optname, char __user *optval, int __user *optlen)
3083{
3084 struct inet_connection_sock *icsk = inet_csk(sk);
3085 struct tcp_sock *tp = tcp_sk(sk);

--- 586 unchanged lines hidden ---