Lines Matching full:skb

16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
22 while (skb) {
24 skb_shinfo(skb)->tx_flags |= flags;
25 skb_shinfo(skb)->tskey = ts_seq;
29 skb = skb->next;
88 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
92 if (IS_ERR(skb))
93 return skb;
95 return __tcpv4_gso_segment_list_csum(skb);
98 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
104 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
108 struct tcphdr *th = tcp_hdr(skb);
110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
111 return __tcp4_gso_segment_list(skb, features);
113 skb->ip_summed = CHECKSUM_NONE;
116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
117 const struct iphdr *iph = ip_hdr(skb);
118 struct tcphdr *th = tcp_hdr(skb);
125 skb->ip_summed = CHECKSUM_PARTIAL;
126 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
129 return tcp_gso_segment(skb, features);
132 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
142 struct sk_buff *gso_skb = skb;
148 th = tcp_hdr(skb);
153 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
156 if (!pskb_may_pull(skb, thlen))
159 oldlen = ~skb->len;
160 __skb_pull(skb, thlen);
162 mss = skb_shinfo(skb)->gso_size;
163 if (unlikely(skb->len <= mss))
166 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
169 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
178 skb->ooo_okay = 0;
180 segs = skb_segment(skb, features);
189 * cases return a GSO skb. So update the mss now.
196 skb = segs;
197 th = tcp_hdr(skb);
207 while (skb->next) {
211 if (skb->ip_summed == CHECKSUM_PARTIAL)
212 gso_reset_checksum(skb, ~th->check);
214 th->check = gso_make_checksum(skb, ~th->check);
218 skb->destructor = gso_skb->destructor;
219 skb->sk = gso_skb->sk;
220 sum_truesize += skb->truesize;
222 skb = skb->next;
223 th = tcp_hdr(skb);
238 swap(gso_skb->sk, skb->sk);
239 swap(gso_skb->destructor, skb->destructor);
240 sum_truesize += skb->truesize;
246 refcount_add(delta, &skb->sk->sk_wmem_alloc);
248 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
252 (skb_tail_pointer(skb) -
253 skb_transport_header(skb)) +
254 skb->data_len);
256 if (skb->ip_summed == CHECKSUM_PARTIAL)
257 gso_reset_checksum(skb, ~th->check);
259 th->check = gso_make_checksum(skb, ~th->check);
285 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
290 off = skb_gro_offset(skb);
292 th = skb_gro_header(skb, hlen, off);
301 if (!skb_gro_may_pull(skb, hlen)) {
302 th = skb_gro_header_slow(skb, hlen, off);
307 skb_gro_pull(skb, thlen);
312 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
325 len = skb_gro_len(skb);
345 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
349 if (unlikely(skb_is_gso(skb)))
350 flush |= (mss != skb_shinfo(skb)->gso_size);
355 flush |= skb_cmp_decrypted(p, skb);
359 flush |= skb->ip_summed != p->ip_summed;
360 flush |= skb->csum_level != p->csum_level;
362 skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
364 if (flush || skb_gro_receive_list(p, skb))
370 if (flush || skb_gro_receive(p, skb)) {
379 if (unlikely(skb_is_gso(skb)))
380 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
388 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
391 NAPI_GRO_CB(skb)->flush |= (flush != 0);
396 void tcp_gro_complete(struct sk_buff *skb)
398 struct tcphdr *th = tcp_hdr(skb);
401 if (skb->encapsulation)
402 skb->inner_transport_header = skb->transport_header;
404 skb->csum_start = (unsigned char *)th - skb->head;
405 skb->csum_offset = offsetof(struct tcphdr, check);
406 skb->ip_summed = CHECKSUM_PARTIAL;
408 shinfo = skb_shinfo(skb);
409 shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
416 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
425 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
430 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
434 inet_get_iif_sdif(skb, &iif, &sdif);
435 iph = skb_gro_network_header(skb);
436 net = dev_net_rcu(skb->dev);
440 NAPI_GRO_CB(skb)->is_flist = !sk;
446 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
451 if (!NAPI_GRO_CB(skb)->flush &&
452 skb_gro_checksum_validate(skb, IPPROTO_TCP,
456 th = tcp_gro_pull_header(skb);
460 tcp4_check_fraglist_gro(head, skb, th);
462 return tcp_gro_receive(head, skb, th);
465 NAPI_GRO_CB(skb)->flush = 1;
469 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
471 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
472 const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
473 struct tcphdr *th = tcp_hdr(skb);
475 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
476 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
477 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
479 __skb_incr_checksum_unnecessary(skb);
484 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
488 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
489 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
491 tcp_gro_complete(skb);