tcp_output.c (516d5f8b04ce2bcd24f03323fc743ae25b81373d) | tcp_output.c (7faee5c0d514162853a343d93e4a0b6bb8bfec21) |
---|---|
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro --- 536 unchanged lines hidden (view full) --- 545 * should, and thus we won't abide by the delayed ACK rules correctly. 546 * SACKs don't matter, we never delay an ACK when we have any of those 547 * going out. */ 548 opts->mss = tcp_advertise_mss(sk); 549 remaining -= TCPOLEN_MSS_ALIGNED; 550 551 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 552 opts->options |= OPTION_TS; | 1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro --- 536 unchanged lines hidden (view full) --- 545 * should, and thus we won't abide by the delayed ACK rules correctly. 546 * SACKs don't matter, we never delay an ACK when we have any of those 547 * going out. */ 548 opts->mss = tcp_advertise_mss(sk); 549 remaining -= TCPOLEN_MSS_ALIGNED; 550 551 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 552 opts->options |= OPTION_TS; |
553 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; | 553 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
554 opts->tsecr = tp->rx_opt.ts_recent; 555 remaining -= TCPOLEN_TSTAMP_ALIGNED; 556 } 557 if (likely(sysctl_tcp_window_scaling)) { 558 opts->ws = tp->rx_opt.rcv_wscale; 559 opts->options |= OPTION_WSCALE; 560 remaining -= TCPOLEN_WSCALE_ALIGNED; 561 } --- 51 unchanged lines hidden (view full) --- 613 614 if (likely(ireq->wscale_ok)) { 615 opts->ws = ireq->rcv_wscale; 616 opts->options |= OPTION_WSCALE; 617 remaining -= TCPOLEN_WSCALE_ALIGNED; 618 } 619 if (likely(ireq->tstamp_ok)) { 620 opts->options |= OPTION_TS; | 554 opts->tsecr = tp->rx_opt.ts_recent; 555 remaining -= TCPOLEN_TSTAMP_ALIGNED; 556 } 557 if (likely(sysctl_tcp_window_scaling)) { 558 opts->ws = tp->rx_opt.rcv_wscale; 559 opts->options |= OPTION_WSCALE; 560 remaining -= TCPOLEN_WSCALE_ALIGNED; 561 } --- 51 unchanged lines hidden (view full) --- 613 614 if (likely(ireq->wscale_ok)) { 615 opts->ws = ireq->rcv_wscale; 616 opts->options |= OPTION_WSCALE; 617 remaining -= TCPOLEN_WSCALE_ALIGNED; 618 } 619 if (likely(ireq->tstamp_ok)) { 620 opts->options |= OPTION_TS; |
621 opts->tsval = TCP_SKB_CB(skb)->when; | 621 opts->tsval = tcp_skb_timestamp(skb); |
622 opts->tsecr = req->ts_recent; 623 remaining -= TCPOLEN_TSTAMP_ALIGNED; 624 } 625 if (likely(ireq->sack_ok)) { 626 opts->options |= OPTION_SACK_ADVERTISE; 627 if (unlikely(!ireq->tstamp_ok)) 628 remaining -= TCPOLEN_SACKPERM_ALIGNED; 629 } --- 12 unchanged lines hidden (view full) --- 642 643/* Compute TCP options for ESTABLISHED sockets. This is not the 644 * final wire format yet. 645 */ 646static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 647 struct tcp_out_options *opts, 648 struct tcp_md5sig_key **md5) 649{ | 622 opts->tsecr = req->ts_recent; 623 remaining -= TCPOLEN_TSTAMP_ALIGNED; 624 } 625 if (likely(ireq->sack_ok)) { 626 opts->options |= OPTION_SACK_ADVERTISE; 627 if (unlikely(!ireq->tstamp_ok)) 628 remaining -= TCPOLEN_SACKPERM_ALIGNED; 629 } --- 12 unchanged lines hidden (view full) --- 642 643/* Compute TCP options for ESTABLISHED sockets. This is not the 644 * final wire format yet. 645 */ 646static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 647 struct tcp_out_options *opts, 648 struct tcp_md5sig_key **md5) 649{ |
650 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; | |
651 struct tcp_sock *tp = tcp_sk(sk); 652 unsigned int size = 0; 653 unsigned int eff_sacks; 654 655 opts->options = 0; 656 657#ifdef CONFIG_TCP_MD5SIG 658 *md5 = tp->af_specific->md5_lookup(sk, sk); 659 if (unlikely(*md5)) { 660 opts->options |= OPTION_MD5; 661 size += TCPOLEN_MD5SIG_ALIGNED; 662 } 663#else 664 *md5 = NULL; 665#endif 666 667 if (likely(tp->rx_opt.tstamp_ok)) { 668 opts->options |= OPTION_TS; | 650 struct tcp_sock *tp = tcp_sk(sk); 651 unsigned int size = 0; 652 unsigned int eff_sacks; 653 654 opts->options = 0; 655 656#ifdef CONFIG_TCP_MD5SIG 657 *md5 = tp->af_specific->md5_lookup(sk, sk); 658 if (unlikely(*md5)) { 659 opts->options |= OPTION_MD5; 660 size += TCPOLEN_MD5SIG_ALIGNED; 661 } 662#else 663 *md5 = NULL; 664#endif 665 666 if (likely(tp->rx_opt.tstamp_ok)) { 667 opts->options |= OPTION_TS; |
669 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; | 668 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; |
670 opts->tsecr = tp->rx_opt.ts_recent; 671 size += TCPOLEN_TSTAMP_ALIGNED; 672 } 673 674 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 675 if (unlikely(eff_sacks)) { 676 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 677 opts->num_sack_blocks = --- 203 unchanged lines hidden (view full) --- 881 skb_mstamp_get(&skb->skb_mstamp); 882 883 if (unlikely(skb_cloned(skb))) 884 skb = pskb_copy(skb, gfp_mask); 885 else 886 skb = skb_clone(skb, gfp_mask); 887 if (unlikely(!skb)) 888 return -ENOBUFS; | 669 opts->tsecr = tp->rx_opt.ts_recent; 670 size += TCPOLEN_TSTAMP_ALIGNED; 671 } 672 673 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 674 if (unlikely(eff_sacks)) { 675 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 676 opts->num_sack_blocks = --- 203 unchanged lines hidden (view full) --- 880 skb_mstamp_get(&skb->skb_mstamp); 881 882 if (unlikely(skb_cloned(skb))) 883 skb = pskb_copy(skb, gfp_mask); 884 else 885 skb = skb_clone(skb, gfp_mask); 886 if (unlikely(!skb)) 887 return -ENOBUFS; |
889 /* Our usage of tstamp should remain private */ 890 skb->tstamp.tv64 = 0; | |
891 } 892 893 inet = inet_sk(sk); 894 tp = tcp_sk(sk); 895 tcb = TCP_SKB_CB(skb); 896 memset(&opts, 0, sizeof(opts)); 897 898 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) --- 71 unchanged lines hidden (view full) --- 970 971 if (skb->len != tcp_header_size) 972 tcp_event_data_sent(tp, sk); 973 974 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 975 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 976 tcp_skb_pcount(skb)); 977 | 888 } 889 890 inet = inet_sk(sk); 891 tp = tcp_sk(sk); 892 tcb = TCP_SKB_CB(skb); 893 memset(&opts, 0, sizeof(opts)); 894 895 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) --- 71 unchanged lines hidden (view full) --- 967 968 if (skb->len != tcp_header_size) 969 tcp_event_data_sent(tp, sk); 970 971 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 972 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 973 tcp_skb_pcount(skb)); 974 |
975 /* Our usage of tstamp should remain private */ 976 skb->tstamp.tv64 = 0; |
|
978 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); | 977 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
978 |
|
979 if (likely(err <= 0)) 980 return err; 981 982 tcp_enter_cwr(sk); 983 984 return net_xmit_eval(err); 985} 986 --- 157 unchanged lines hidden (view full) --- 1144 skb_split(skb, buff, len); 1145 } 1146 1147 buff->ip_summed = skb->ip_summed; 1148 1149 /* Looks stupid, but our code really uses when of 1150 * skbs, which it never sent before. --ANK 1151 */ | 979 if (likely(err <= 0)) 980 return err; 981 982 tcp_enter_cwr(sk); 983 984 return net_xmit_eval(err); 985} 986 --- 157 unchanged lines hidden (view full) --- 1144 skb_split(skb, buff, len); 1145 } 1146 1147 buff->ip_summed = skb->ip_summed; 1148 1149 /* Looks stupid, but our code really uses when of 1150 * skbs, which it never sent before. --ANK 1151 */ |
1152 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; | |
1153 buff->tstamp = skb->tstamp; 1154 tcp_fragment_tstamp(skb, buff); 1155 1156 old_factor = tcp_skb_pcount(skb); 1157 1158 /* Fix up tso_factor for both original and new SKB. */ 1159 tcp_set_skb_tso_segs(sk, skb, mss_now); 1160 tcp_set_skb_tso_segs(sk, buff, mss_now); --- 708 unchanged lines hidden (view full) --- 1869 len += copy; 1870 1871 if (len >= probe_size) 1872 break; 1873 } 1874 tcp_init_tso_segs(sk, nskb, nskb->len); 1875 1876 /* We're ready to send. If this fails, the probe will | 1152 buff->tstamp = skb->tstamp; 1153 tcp_fragment_tstamp(skb, buff); 1154 1155 old_factor = tcp_skb_pcount(skb); 1156 1157 /* Fix up tso_factor for both original and new SKB. */ 1158 tcp_set_skb_tso_segs(sk, skb, mss_now); 1159 tcp_set_skb_tso_segs(sk, buff, mss_now); --- 708 unchanged lines hidden (view full) --- 1868 len += copy; 1869 1870 if (len >= probe_size) 1871 break; 1872 } 1873 tcp_init_tso_segs(sk, nskb, nskb->len); 1874 1875 /* We're ready to send. If this fails, the probe will |
1877 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1878 TCP_SKB_CB(nskb)->when = tcp_time_stamp; | 1876 * be resegmented into mss-sized pieces by tcp_write_xmit(). 1877 */ |
1879 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1880 /* Decrement cwnd here because we are sending 1881 * effectively two packets. */ 1882 tp->snd_cwnd--; 1883 tcp_event_new_data_sent(sk, nskb); 1884 1885 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1886 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; --- 43 unchanged lines hidden (view full) --- 1930 1931 while ((skb = tcp_send_head(sk))) { 1932 unsigned int limit; 1933 1934 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1935 BUG_ON(!tso_segs); 1936 1937 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { | 1878 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1879 /* Decrement cwnd here because we are sending 1880 * effectively two packets. */ 1881 tp->snd_cwnd--; 1882 tcp_event_new_data_sent(sk, nskb); 1883 1884 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1885 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; --- 43 unchanged lines hidden (view full) --- 1929 1930 while ((skb = tcp_send_head(sk))) { 1931 unsigned int limit; 1932 1933 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1934 BUG_ON(!tso_segs); 1935 1936 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { |
1938 /* "when" is used as a start point for the retransmit timer */ 1939 TCP_SKB_CB(skb)->when = tcp_time_stamp; | 1937 /* "skb_mstamp" is used as a start point for the retransmit timer */ 1938 skb_mstamp_get(&skb->skb_mstamp); |
1940 goto repair; /* Skip network transmission */ 1941 } 1942 1943 cwnd_quota = tcp_cwnd_test(tp, skb); 1944 if (!cwnd_quota) { 1945 is_cwnd_limited = true; 1946 if (push_one == 2) 1947 /* Force out a loss probe pkt. */ --- 47 unchanged lines hidden (view full) --- 1995 cwnd_quota, 1996 sk->sk_gso_max_segs), 1997 nonagle); 1998 1999 if (skb->len > limit && 2000 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2001 break; 2002 | 1939 goto repair; /* Skip network transmission */ 1940 } 1941 1942 cwnd_quota = tcp_cwnd_test(tp, skb); 1943 if (!cwnd_quota) { 1944 is_cwnd_limited = true; 1945 if (push_one == 2) 1946 /* Force out a loss probe pkt. */ --- 47 unchanged lines hidden (view full) --- 1994 cwnd_quota, 1995 sk->sk_gso_max_segs), 1996 nonagle); 1997 1998 if (skb->len > limit && 1999 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2000 break; 2001 |
2003 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2004 | |
2005 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2006 break; 2007 2008repair: 2009 /* Advance the send_head. This one is sent out. 2010 * This call will increment packets_out. 2011 */ 2012 tcp_event_new_data_sent(sk, skb); --- 481 unchanged lines hidden (view full) --- 2494 } 2495 } 2496 2497 tcp_retrans_try_collapse(sk, skb, cur_mss); 2498 2499 /* Make a copy, if the first transmission SKB clone we made 2500 * is still in somebody's hands, else make a clone. 2501 */ | 2002 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2003 break; 2004 2005repair: 2006 /* Advance the send_head. This one is sent out. 2007 * This call will increment packets_out. 2008 */ 2009 tcp_event_new_data_sent(sk, skb); --- 481 unchanged lines hidden (view full) --- 2491 } 2492 } 2493 2494 tcp_retrans_try_collapse(sk, skb, cur_mss); 2495 2496 /* Make a copy, if the first transmission SKB clone we made 2497 * is still in somebody's hands, else make a clone. 2498 */ |
2502 TCP_SKB_CB(skb)->when = tcp_time_stamp; | |
2503 2504 /* make sure skb->data is aligned on arches that require it 2505 * and check if ack-trimming & collapsing extended the headroom 2506 * beyond what csum_start can cover. 2507 */ 2508 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2509 skb_headroom(skb) >= 0xFFFF)) { 2510 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, --- 28 unchanged lines hidden (view full) --- 2539#endif 2540 if (!tp->retrans_out) 2541 tp->lost_retrans_low = tp->snd_nxt; 2542 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2543 tp->retrans_out += tcp_skb_pcount(skb); 2544 2545 /* Save stamp of the first retransmit. */ 2546 if (!tp->retrans_stamp) | 2499 2500 /* make sure skb->data is aligned on arches that require it 2501 * and check if ack-trimming & collapsing extended the headroom 2502 * beyond what csum_start can cover. 2503 */ 2504 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2505 skb_headroom(skb) >= 0xFFFF)) { 2506 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, --- 28 unchanged lines hidden (view full) --- 2535#endif 2536 if (!tp->retrans_out) 2537 tp->lost_retrans_low = tp->snd_nxt; 2538 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2539 tp->retrans_out += tcp_skb_pcount(skb); 2540 2541 /* Save stamp of the first retransmit. */ 2542 if (!tp->retrans_stamp) |
2547 tp->retrans_stamp = TCP_SKB_CB(skb)->when; | 2543 tp->retrans_stamp = tcp_skb_timestamp(skb); |
2548 2549 /* snd_nxt is stored to detect loss of retransmitted segment, 2550 * see tcp_input.c tcp_sacktag_write_queue(). 2551 */ 2552 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2553 } else if (err != -EBUSY) { 2554 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2555 } --- 191 unchanged lines hidden (view full) --- 2747 return; 2748 } 2749 2750 /* Reserve space for headers and prepare control bits. */ 2751 skb_reserve(skb, MAX_TCP_HEADER); 2752 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2753 TCPHDR_ACK | TCPHDR_RST); 2754 /* Send it off. */ | 2544 2545 /* snd_nxt is stored to detect loss of retransmitted segment, 2546 * see tcp_input.c tcp_sacktag_write_queue(). 2547 */ 2548 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2549 } else if (err != -EBUSY) { 2550 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2551 } --- 191 unchanged lines hidden (view full) --- 2743 return; 2744 } 2745 2746 /* Reserve space for headers and prepare control bits. */ 2747 skb_reserve(skb, MAX_TCP_HEADER); 2748 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2749 TCPHDR_ACK | TCPHDR_RST); 2750 /* Send it off. */ |
2755 TCP_SKB_CB(skb)->when = tcp_time_stamp; | |
2756 if (tcp_transmit_skb(sk, skb, 0, priority)) 2757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2758 2759 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2760} 2761 2762/* Send a crossed SYN-ACK during socket establishment. 2763 * WARNING: This routine must only be called when we have already sent --- 22 unchanged lines hidden (view full) --- 2786 sk->sk_wmem_queued += nskb->truesize; 2787 sk_mem_charge(sk, nskb->truesize); 2788 skb = nskb; 2789 } 2790 2791 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2792 TCP_ECN_send_synack(tcp_sk(sk), skb); 2793 } | 2751 if (tcp_transmit_skb(sk, skb, 0, priority)) 2752 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2753 2754 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2755} 2756 2757/* Send a crossed SYN-ACK during socket establishment. 2758 * WARNING: This routine must only be called when we have already sent --- 22 unchanged lines hidden (view full) --- 2781 sk->sk_wmem_queued += nskb->truesize; 2782 sk_mem_charge(sk, nskb->truesize); 2783 skb = nskb; 2784 } 2785 2786 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2787 TCP_ECN_send_synack(tcp_sk(sk), skb); 2788 } |
2794 TCP_SKB_CB(skb)->when = tcp_time_stamp; | |
2795 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2796} 2797 2798/** 2799 * tcp_make_synack - Prepare a SYN-ACK. 2800 * sk: listener socket 2801 * dst: dst entry attached to the SYNACK 2802 * req: request_sock pointer --- 27 unchanged lines hidden (view full) --- 2830 2831 mss = dst_metric_advmss(dst); 2832 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2833 mss = tp->rx_opt.user_mss; 2834 2835 memset(&opts, 0, sizeof(opts)); 2836#ifdef CONFIG_SYN_COOKIES 2837 if (unlikely(req->cookie_ts)) | 2789 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2790} 2791 2792/** 2793 * tcp_make_synack - Prepare a SYN-ACK. 2794 * sk: listener socket 2795 * dst: dst entry attached to the SYNACK 2796 * req: request_sock pointer --- 27 unchanged lines hidden (view full) --- 2824 2825 mss = dst_metric_advmss(dst); 2826 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2827 mss = tp->rx_opt.user_mss; 2828 2829 memset(&opts, 0, sizeof(opts)); 2830#ifdef CONFIG_SYN_COOKIES 2831 if (unlikely(req->cookie_ts)) |
2838 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); | 2832 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); |
2839 else 2840#endif | 2833 else 2834#endif |
2841 TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2835 skb_mstamp_get(&skb->skb_mstamp); |
2842 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2843 foc) + sizeof(*th); 2844 2845 skb_push(skb, tcp_header_size); 2846 skb_reset_transport_header(skb); 2847 2848 th = tcp_hdr(skb); 2849 memset(th, 0, sizeof(struct tcphdr)); --- 231 unchanged lines hidden (view full) --- 3081 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 3082 if (unlikely(buff == NULL)) 3083 return -ENOBUFS; 3084 3085 /* Reserve space for headers. */ 3086 skb_reserve(buff, MAX_TCP_HEADER); 3087 3088 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); | 2836 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2837 foc) + sizeof(*th); 2838 2839 skb_push(skb, tcp_header_size); 2840 skb_reset_transport_header(skb); 2841 2842 th = tcp_hdr(skb); 2843 memset(th, 0, sizeof(struct tcphdr)); --- 231 unchanged lines hidden (view full) --- 3075 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 3076 if (unlikely(buff == NULL)) 3077 return -ENOBUFS; 3078 3079 /* Reserve space for headers. */ 3080 skb_reserve(buff, MAX_TCP_HEADER); 3081 3082 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); |
3089 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; | 3083 tp->retrans_stamp = tcp_time_stamp; |
3090 tcp_connect_queue_skb(sk, buff); 3091 TCP_ECN_send_syn(sk, buff); 3092 3093 /* Send off SYN; include data in Fast Open. */ 3094 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3095 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3096 if (err == -ECONNREFUSED) 3097 return err; --- 91 unchanged lines hidden (view full) --- 3189 return; 3190 } 3191 3192 /* Reserve space for headers and prepare control bits. */ 3193 skb_reserve(buff, MAX_TCP_HEADER); 3194 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3195 3196 /* Send it off, this clears delayed acks for us. */ | 3084 tcp_connect_queue_skb(sk, buff); 3085 TCP_ECN_send_syn(sk, buff); 3086 3087 /* Send off SYN; include data in Fast Open. */ 3088 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3089 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3090 if (err == -ECONNREFUSED) 3091 return err; --- 91 unchanged lines hidden (view full) --- 3183 return; 3184 } 3185 3186 /* Reserve space for headers and prepare control bits. */ 3187 skb_reserve(buff, MAX_TCP_HEADER); 3188 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3189 3190 /* Send it off, this clears delayed acks for us. */ |
3197 TCP_SKB_CB(buff)->when = tcp_time_stamp; | 3191 skb_mstamp_get(&buff->skb_mstamp); |
3198 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3199} 3200 3201/* This routine sends a packet with an out of date sequence 3202 * number. It assumes the other end will try to ack it. 3203 * 3204 * Question: what should we make while urgent mode? 3205 * 4.4BSD forces sending single byte of data. We cannot send --- 15 unchanged lines hidden (view full) --- 3221 3222 /* Reserve space for headers and set control bits. */ 3223 skb_reserve(skb, MAX_TCP_HEADER); 3224 /* Use a previous sequence. This should cause the other 3225 * end to send an ack. Don't queue or clone SKB, just 3226 * send it. 3227 */ 3228 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); | 3192 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3193} 3194 3195/* This routine sends a packet with an out of date sequence 3196 * number. It assumes the other end will try to ack it. 3197 * 3198 * Question: what should we make while urgent mode? 3199 * 4.4BSD forces sending single byte of data. We cannot send --- 15 unchanged lines hidden (view full) --- 3215 3216 /* Reserve space for headers and set control bits. */ 3217 skb_reserve(skb, MAX_TCP_HEADER); 3218 /* Use a previous sequence. This should cause the other 3219 * end to send an ack. Don't queue or clone SKB, just 3220 * send it. 3221 */ 3222 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); |
3229 TCP_SKB_CB(skb)->when = tcp_time_stamp; | 3223 skb_mstamp_get(&skb->skb_mstamp); |
3230 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3231} 3232 3233void tcp_send_window_probe(struct sock *sk) 3234{ 3235 if (sk->sk_state == TCP_ESTABLISHED) { 3236 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3237 tcp_xmit_probe_skb(sk, 0); --- 27 unchanged lines hidden (view full) --- 3265 seg_size = min(seg_size, mss); 3266 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3267 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) 3268 return -1; 3269 } else if (!tcp_skb_pcount(skb)) 3270 tcp_set_skb_tso_segs(sk, skb, mss); 3271 3272 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; | 3224 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3225} 3226 3227void tcp_send_window_probe(struct sock *sk) 3228{ 3229 if (sk->sk_state == TCP_ESTABLISHED) { 3230 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3231 tcp_xmit_probe_skb(sk, 0); --- 27 unchanged lines hidden (view full) --- 3259 seg_size = min(seg_size, mss); 3260 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3261 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) 3262 return -1; 3263 } else if (!tcp_skb_pcount(skb)) 3264 tcp_set_skb_tso_segs(sk, skb, mss); 3265 3266 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
3273 TCP_SKB_CB(skb)->when = tcp_time_stamp; | |
3274 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3275 if (!err) 3276 tcp_event_new_data_sent(sk, skb); 3277 return err; 3278 } else { 3279 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3280 tcp_xmit_probe_skb(sk, 1); 3281 return tcp_xmit_probe_skb(sk, 0); --- 58 unchanged lines hidden --- | 3267 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3268 if (!err) 3269 tcp_event_new_data_sent(sk, skb); 3270 return err; 3271 } else { 3272 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3273 tcp_xmit_probe_skb(sk, 1); 3274 return tcp_xmit_probe_skb(sk, 0); --- 58 unchanged lines hidden --- |