tcp_output.c (9ff17e6bdaa50892dd9bdb1b116cb71b73dd711a) | tcp_output.c (736013292e3ca5ec2aabb32daf72a73b1256ac57) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * --- 2305 unchanged lines hidden (view full) --- 2314 return false; 2315 2316 len -= skb->len; 2317 } 2318 2319 return true; 2320} 2321 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * --- 2305 unchanged lines hidden (view full) --- 2314 return false; 2315 2316 len -= skb->len; 2317 } 2318 2319 return true; 2320} 2321 |
2322static int tcp_clone_payload(struct sock *sk, struct sk_buff *to, 2323 int probe_size) 2324{ 2325 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; 2326 int i, todo, len = 0, nr_frags = 0; 2327 const struct sk_buff *skb; 2328 2329 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) 2330 return -ENOMEM; 2331 2332 skb_queue_walk(&sk->sk_write_queue, skb) { 2333 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; 2334 2335 if (skb_headlen(skb)) 2336 return -EINVAL; 2337 2338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { 2339 if (len >= probe_size) 2340 goto commit; 2341 todo = min_t(int, skb_frag_size(fragfrom), 2342 probe_size - len); 2343 len += todo; 2344 if (lastfrag && 2345 skb_frag_page(fragfrom) == skb_frag_page(lastfrag) && 2346 skb_frag_off(fragfrom) == skb_frag_off(lastfrag) + 2347 skb_frag_size(lastfrag)) { 2348 skb_frag_size_add(lastfrag, todo); 2349 continue; 2350 } 2351 if (unlikely(nr_frags == MAX_SKB_FRAGS)) 2352 return -E2BIG; 2353 skb_frag_page_copy(fragto, fragfrom); 2354 skb_frag_off_copy(fragto, fragfrom); 2355 skb_frag_size_set(fragto, todo); 2356 nr_frags++; 2357 lastfrag = fragto++; 2358 } 2359 } 2360commit: 2361 WARN_ON_ONCE(len != probe_size); 2362 for (i = 0; i < nr_frags; i++) 2363 skb_frag_ref(to, i); 2364 2365 skb_shinfo(to)->nr_frags = nr_frags; 2366 to->truesize += probe_size; 2367 to->len += probe_size; 2368 to->data_len += probe_size; 2369 __skb_header_release(to); 2370 return 0; 2371} 2372 |
|
2322/* Create a new MTU probe if we are ready. 2323 * MTU probe is regularly attempting to increase the path MTU by 2324 * deliberately sending larger packets. This discovers routing 2325 * changes resulting in larger path MTUs. 2326 * 2327 * Returns 0 if we should wait to probe (no cwnd available), 2328 * 1 if a probe was sent, 2329 * -1 otherwise --- 60 unchanged lines hidden (view full) --- 2390 else 2391 return 0; 2392 } 2393 2394 if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) 2395 return -1; 2396 2397 /* We're allowed to probe. Build it now. */ | 2373/* Create a new MTU probe if we are ready. 2374 * MTU probe is regularly attempting to increase the path MTU by 2375 * deliberately sending larger packets. This discovers routing 2376 * changes resulting in larger path MTUs. 2377 * 2378 * Returns 0 if we should wait to probe (no cwnd available), 2379 * 1 if a probe was sent, 2380 * -1 otherwise --- 60 unchanged lines hidden (view full) --- 2441 else 2442 return 0; 2443 } 2444 2445 if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) 2446 return -1; 2447 2448 /* We're allowed to probe. Build it now. */ |
2398 nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); | 2449 nskb = tcp_stream_alloc_skb(sk, 0, GFP_ATOMIC, false); |
2399 if (!nskb) 2400 return -1; | 2450 if (!nskb) 2451 return -1; |
2452 2453 /* build the payload, and be prepared to abort if this fails. */ 2454 if (tcp_clone_payload(sk, nskb, probe_size)) { 2455 consume_skb(nskb); 2456 return -1; 2457 } |
|
2401 sk_wmem_queued_add(sk, nskb->truesize); 2402 sk_mem_charge(sk, nskb->truesize); 2403 2404 skb = tcp_send_head(sk); 2405 skb_copy_decrypted(nskb, skb); 2406 mptcp_skb_ext_copy(nskb, skb); 2407 2408 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2409 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2410 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 2411 2412 tcp_insert_write_queue_before(nskb, skb, sk); 2413 tcp_highest_sack_replace(sk, skb, nskb); 2414 2415 len = 0; 2416 tcp_for_write_queue_from_safe(skb, next, sk) { 2417 copy = min_t(int, skb->len, probe_size - len); | 2458 sk_wmem_queued_add(sk, nskb->truesize); 2459 sk_mem_charge(sk, nskb->truesize); 2460 2461 skb = tcp_send_head(sk); 2462 skb_copy_decrypted(nskb, skb); 2463 mptcp_skb_ext_copy(nskb, skb); 2464 2465 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2466 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2467 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 2468 2469 tcp_insert_write_queue_before(nskb, skb, sk); 2470 tcp_highest_sack_replace(sk, skb, nskb); 2471 2472 len = 0; 2473 tcp_for_write_queue_from_safe(skb, next, sk) { 2474 copy = min_t(int, skb->len, probe_size - len); |
2418 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); | |
2419 2420 if (skb->len <= copy) { 2421 /* We've eaten all the data from this skb. 2422 * Throw it away. */ 2423 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2424 /* If this is the last SKB we copy and eor is set 2425 * we need to propagate it to the new skb. 2426 */ --- 1715 unchanged lines hidden --- | 2475 2476 if (skb->len <= copy) { 2477 /* We've eaten all the data from this skb. 2478 * Throw it away. */ 2479 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2480 /* If this is the last SKB we copy and eor is set 2481 * we need to propagate it to the new skb. 2482 */ --- 1715 unchanged lines hidden --- |