1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Checksum updating actions
4 *
5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 */
7
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/spinlock.h>
13
14 #include <linux/netlink.h>
15 #include <net/netlink.h>
16 #include <linux/rtnetlink.h>
17
18 #include <linux/skbuff.h>
19
20 #include <net/ip.h>
21 #include <net/ipv6.h>
22 #include <net/icmp.h>
23 #include <linux/icmpv6.h>
24 #include <linux/igmp.h>
25 #include <net/tcp.h>
26 #include <net/udp.h>
27 #include <net/ip6_checksum.h>
28 #include <net/sctp/checksum.h>
29
30 #include <net/act_api.h>
31 #include <net/pkt_cls.h>
32
33 #include <linux/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_wrapper.h>
36
37 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
38 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
39 };
40
41 static struct tc_action_ops act_csum_ops;
42
tcf_csum_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)43 static int tcf_csum_init(struct net *net, struct nlattr *nla,
44 struct nlattr *est, struct tc_action **a,
45 struct tcf_proto *tp,
46 u32 flags, struct netlink_ext_ack *extack)
47 {
48 struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
49 bool bind = flags & TCA_ACT_FLAGS_BIND;
50 struct tcf_csum_params *params_new;
51 struct nlattr *tb[TCA_CSUM_MAX + 1];
52 struct tcf_chain *goto_ch = NULL;
53 struct tc_csum *parm;
54 struct tcf_csum *p;
55 int ret = 0, err;
56 u32 index;
57
58 if (nla == NULL)
59 return -EINVAL;
60
61 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
62 NULL);
63 if (err < 0)
64 return err;
65
66 if (tb[TCA_CSUM_PARMS] == NULL)
67 return -EINVAL;
68 parm = nla_data(tb[TCA_CSUM_PARMS]);
69 index = parm->index;
70 err = tcf_idr_check_alloc(tn, &index, a, bind);
71 if (!err) {
72 ret = tcf_idr_create_from_flags(tn, index, est, a,
73 &act_csum_ops, bind, flags);
74 if (ret) {
75 tcf_idr_cleanup(tn, index);
76 return ret;
77 }
78 ret = ACT_P_CREATED;
79 } else if (err > 0) {
80 if (bind) /* dont override defaults */
81 return ACT_P_BOUND;
82 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
83 tcf_idr_release(*a, bind);
84 return -EEXIST;
85 }
86 } else {
87 return err;
88 }
89
90 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
91 if (err < 0)
92 goto release_idr;
93
94 p = to_tcf_csum(*a);
95
96 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
97 if (unlikely(!params_new)) {
98 err = -ENOMEM;
99 goto put_chain;
100 }
101 params_new->update_flags = parm->update_flags;
102
103 spin_lock_bh(&p->tcf_lock);
104 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
105 params_new = rcu_replace_pointer(p->params, params_new,
106 lockdep_is_held(&p->tcf_lock));
107 spin_unlock_bh(&p->tcf_lock);
108
109 if (goto_ch)
110 tcf_chain_put_by_act(goto_ch);
111 if (params_new)
112 kfree_rcu(params_new, rcu);
113
114 return ret;
115 put_chain:
116 if (goto_ch)
117 tcf_chain_put_by_act(goto_ch);
118 release_idr:
119 tcf_idr_release(*a, bind);
120 return err;
121 }
122
123 /**
124 * tcf_csum_skb_nextlayer - Get next layer pointer
125 * @skb: sk_buff to use
126 * @ihl: previous summed headers length
127 * @ipl: complete packet length
128 * @jhl: next header length
129 *
130 * Check the expected next layer availability in the specified sk_buff.
131 * Return the next layer pointer if pass, NULL otherwise.
132 */
tcf_csum_skb_nextlayer(struct sk_buff * skb,unsigned int ihl,unsigned int ipl,unsigned int jhl)133 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
134 unsigned int ihl, unsigned int ipl,
135 unsigned int jhl)
136 {
137 int ntkoff = skb_network_offset(skb);
138 int hl = ihl + jhl;
139
140 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
141 skb_try_make_writable(skb, hl + ntkoff))
142 return NULL;
143 else
144 return (void *)(skb_network_header(skb) + ihl);
145 }
146
tcf_csum_ipv4_icmp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)147 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
148 unsigned int ipl)
149 {
150 struct icmphdr *icmph;
151
152 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
153 if (icmph == NULL)
154 return 0;
155
156 icmph->checksum = 0;
157 skb->csum = csum_partial(icmph, ipl - ihl, 0);
158 icmph->checksum = csum_fold(skb->csum);
159
160 skb->ip_summed = CHECKSUM_NONE;
161
162 return 1;
163 }
164
tcf_csum_ipv4_igmp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)165 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
166 unsigned int ihl, unsigned int ipl)
167 {
168 struct igmphdr *igmph;
169
170 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
171 if (igmph == NULL)
172 return 0;
173
174 igmph->csum = 0;
175 skb->csum = csum_partial(igmph, ipl - ihl, 0);
176 igmph->csum = csum_fold(skb->csum);
177
178 skb->ip_summed = CHECKSUM_NONE;
179
180 return 1;
181 }
182
tcf_csum_ipv6_icmp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)183 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
184 unsigned int ipl)
185 {
186 struct icmp6hdr *icmp6h;
187 const struct ipv6hdr *ip6h;
188
189 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
190 if (icmp6h == NULL)
191 return 0;
192
193 ip6h = ipv6_hdr(skb);
194 icmp6h->icmp6_cksum = 0;
195 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
196 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
197 ipl - ihl, IPPROTO_ICMPV6,
198 skb->csum);
199
200 skb->ip_summed = CHECKSUM_NONE;
201
202 return 1;
203 }
204
tcf_csum_ipv4_tcp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)205 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
206 unsigned int ipl)
207 {
208 struct tcphdr *tcph;
209 const struct iphdr *iph;
210
211 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
212 return 1;
213
214 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
215 if (tcph == NULL)
216 return 0;
217
218 iph = ip_hdr(skb);
219 tcph->check = 0;
220 skb->csum = csum_partial(tcph, ipl - ihl, 0);
221 tcph->check = tcp_v4_check(ipl - ihl,
222 iph->saddr, iph->daddr, skb->csum);
223
224 skb->ip_summed = CHECKSUM_NONE;
225
226 return 1;
227 }
228
tcf_csum_ipv6_tcp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)229 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
230 unsigned int ipl)
231 {
232 struct tcphdr *tcph;
233 const struct ipv6hdr *ip6h;
234
235 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
236 return 1;
237
238 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
239 if (tcph == NULL)
240 return 0;
241
242 ip6h = ipv6_hdr(skb);
243 tcph->check = 0;
244 skb->csum = csum_partial(tcph, ipl - ihl, 0);
245 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
246 ipl - ihl, IPPROTO_TCP,
247 skb->csum);
248
249 skb->ip_summed = CHECKSUM_NONE;
250
251 return 1;
252 }
253
tcf_csum_ipv4_udp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl,int udplite)254 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
255 unsigned int ipl, int udplite)
256 {
257 struct udphdr *udph;
258 const struct iphdr *iph;
259 u16 ul;
260
261 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
262 return 1;
263
264 /*
265 * Support both UDP and UDPLITE checksum algorithms, Don't use
266 * udph->len to get the real length without any protocol check,
267 * UDPLITE uses udph->len for another thing,
268 * Use iph->tot_len, or just ipl.
269 */
270
271 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
272 if (udph == NULL)
273 return 0;
274
275 iph = ip_hdr(skb);
276 ul = ntohs(udph->len);
277
278 if (udplite || udph->check) {
279
280 udph->check = 0;
281
282 if (udplite) {
283 if (ul == 0)
284 skb->csum = csum_partial(udph, ipl - ihl, 0);
285 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
286 skb->csum = csum_partial(udph, ul, 0);
287 else
288 goto ignore_obscure_skb;
289 } else {
290 if (ul != ipl - ihl)
291 goto ignore_obscure_skb;
292
293 skb->csum = csum_partial(udph, ul, 0);
294 }
295
296 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
297 ul, iph->protocol,
298 skb->csum);
299
300 if (!udph->check)
301 udph->check = CSUM_MANGLED_0;
302 }
303
304 skb->ip_summed = CHECKSUM_NONE;
305
306 ignore_obscure_skb:
307 return 1;
308 }
309
tcf_csum_ipv6_udp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl,int udplite)310 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
311 unsigned int ipl, int udplite)
312 {
313 struct udphdr *udph;
314 const struct ipv6hdr *ip6h;
315 u16 ul;
316
317 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
318 return 1;
319
320 /*
321 * Support both UDP and UDPLITE checksum algorithms, Don't use
322 * udph->len to get the real length without any protocol check,
323 * UDPLITE uses udph->len for another thing,
324 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
325 */
326
327 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
328 if (udph == NULL)
329 return 0;
330
331 ip6h = ipv6_hdr(skb);
332 ul = ntohs(udph->len);
333
334 udph->check = 0;
335
336 if (udplite) {
337 if (ul == 0)
338 skb->csum = csum_partial(udph, ipl - ihl, 0);
339
340 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
341 skb->csum = csum_partial(udph, ul, 0);
342
343 else
344 goto ignore_obscure_skb;
345 } else {
346 if (ul != ipl - ihl)
347 goto ignore_obscure_skb;
348
349 skb->csum = csum_partial(udph, ul, 0);
350 }
351
352 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
353 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
354 skb->csum);
355
356 if (!udph->check)
357 udph->check = CSUM_MANGLED_0;
358
359 skb->ip_summed = CHECKSUM_NONE;
360
361 ignore_obscure_skb:
362 return 1;
363 }
364
tcf_csum_sctp(struct sk_buff * skb,unsigned int ihl,unsigned int ipl)365 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
366 unsigned int ipl)
367 {
368 struct sctphdr *sctph;
369
370 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
371 return 1;
372
373 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
374 if (!sctph)
375 return 0;
376
377 sctph->checksum = sctp_compute_cksum(skb,
378 skb_network_offset(skb) + ihl);
379 skb_reset_csum_not_inet(skb);
380
381 return 1;
382 }
383
tcf_csum_ipv4(struct sk_buff * skb,u32 update_flags)384 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
385 {
386 const struct iphdr *iph;
387 int ntkoff;
388
389 ntkoff = skb_network_offset(skb);
390
391 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
392 goto fail;
393
394 iph = ip_hdr(skb);
395
396 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
397 case IPPROTO_ICMP:
398 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
399 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
400 ntohs(iph->tot_len)))
401 goto fail;
402 break;
403 case IPPROTO_IGMP:
404 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
405 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
406 ntohs(iph->tot_len)))
407 goto fail;
408 break;
409 case IPPROTO_TCP:
410 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
411 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
412 ntohs(iph->tot_len)))
413 goto fail;
414 break;
415 case IPPROTO_UDP:
416 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
417 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
418 ntohs(iph->tot_len), 0))
419 goto fail;
420 break;
421 case IPPROTO_UDPLITE:
422 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
423 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
424 ntohs(iph->tot_len), 1))
425 goto fail;
426 break;
427 case IPPROTO_SCTP:
428 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
429 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
430 goto fail;
431 break;
432 }
433
434 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
435 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
436 goto fail;
437
438 ip_send_check(ip_hdr(skb));
439 }
440
441 return 1;
442
443 fail:
444 return 0;
445 }
446
tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr * ip6xh,unsigned int ixhl,unsigned int * pl)447 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
448 unsigned int *pl)
449 {
450 int off, len, optlen;
451 unsigned char *xh = (void *)ip6xh;
452
453 off = sizeof(*ip6xh);
454 len = ixhl - off;
455
456 while (len > 1) {
457 switch (xh[off]) {
458 case IPV6_TLV_PAD1:
459 optlen = 1;
460 break;
461 case IPV6_TLV_JUMBO:
462 optlen = xh[off + 1] + 2;
463 if (optlen != 6 || len < 6 || (off & 3) != 2)
464 /* wrong jumbo option length/alignment */
465 return 0;
466 *pl = ntohl(*(__be32 *)(xh + off + 2));
467 goto done;
468 default:
469 optlen = xh[off + 1] + 2;
470 if (optlen > len)
471 /* ignore obscure options */
472 goto done;
473 break;
474 }
475 off += optlen;
476 len -= optlen;
477 }
478
479 done:
480 return 1;
481 }
482
tcf_csum_ipv6(struct sk_buff * skb,u32 update_flags)483 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
484 {
485 struct ipv6hdr *ip6h;
486 struct ipv6_opt_hdr *ip6xh;
487 unsigned int hl, ixhl;
488 unsigned int pl;
489 int ntkoff;
490 u8 nexthdr;
491
492 ntkoff = skb_network_offset(skb);
493
494 hl = sizeof(*ip6h);
495
496 if (!pskb_may_pull(skb, hl + ntkoff))
497 goto fail;
498
499 ip6h = ipv6_hdr(skb);
500
501 pl = ntohs(ip6h->payload_len);
502 nexthdr = ip6h->nexthdr;
503
504 do {
505 switch (nexthdr) {
506 case NEXTHDR_FRAGMENT:
507 goto ignore_skb;
508 case NEXTHDR_ROUTING:
509 case NEXTHDR_HOP:
510 case NEXTHDR_DEST:
511 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
512 goto fail;
513 ip6xh = (void *)(skb_network_header(skb) + hl);
514 ixhl = ipv6_optlen(ip6xh);
515 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
516 goto fail;
517 ip6xh = (void *)(skb_network_header(skb) + hl);
518 if ((nexthdr == NEXTHDR_HOP) &&
519 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
520 goto fail;
521 nexthdr = ip6xh->nexthdr;
522 hl += ixhl;
523 break;
524 case IPPROTO_ICMPV6:
525 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
526 if (!tcf_csum_ipv6_icmp(skb,
527 hl, pl + sizeof(*ip6h)))
528 goto fail;
529 goto done;
530 case IPPROTO_TCP:
531 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
532 if (!tcf_csum_ipv6_tcp(skb,
533 hl, pl + sizeof(*ip6h)))
534 goto fail;
535 goto done;
536 case IPPROTO_UDP:
537 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
538 if (!tcf_csum_ipv6_udp(skb, hl,
539 pl + sizeof(*ip6h), 0))
540 goto fail;
541 goto done;
542 case IPPROTO_UDPLITE:
543 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
544 if (!tcf_csum_ipv6_udp(skb, hl,
545 pl + sizeof(*ip6h), 1))
546 goto fail;
547 goto done;
548 case IPPROTO_SCTP:
549 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
550 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
551 goto fail;
552 goto done;
553 default:
554 goto ignore_skb;
555 }
556 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
557
558 done:
559 ignore_skb:
560 return 1;
561
562 fail:
563 return 0;
564 }
565
tcf_csum_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)566 TC_INDIRECT_SCOPE int tcf_csum_act(struct sk_buff *skb,
567 const struct tc_action *a,
568 struct tcf_result *res)
569 {
570 struct tcf_csum *p = to_tcf_csum(a);
571 bool orig_vlan_tag_present = false;
572 unsigned int vlan_hdr_count = 0;
573 struct tcf_csum_params *params;
574 u32 update_flags;
575 __be16 protocol;
576 int action;
577
578 params = rcu_dereference_bh(p->params);
579
580 tcf_lastuse_update(&p->tcf_tm);
581 tcf_action_update_bstats(&p->common, skb);
582
583 action = READ_ONCE(p->tcf_action);
584 if (unlikely(action == TC_ACT_SHOT))
585 goto drop;
586
587 update_flags = params->update_flags;
588 protocol = skb_protocol(skb, false);
589 again:
590 switch (protocol) {
591 case cpu_to_be16(ETH_P_IP):
592 if (!tcf_csum_ipv4(skb, update_flags))
593 goto drop;
594 break;
595 case cpu_to_be16(ETH_P_IPV6):
596 if (!tcf_csum_ipv6(skb, update_flags))
597 goto drop;
598 break;
599 case cpu_to_be16(ETH_P_8021AD):
600 fallthrough;
601 case cpu_to_be16(ETH_P_8021Q):
602 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
603 protocol = skb->protocol;
604 orig_vlan_tag_present = true;
605 } else {
606 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
607
608 protocol = vlan->h_vlan_encapsulated_proto;
609 skb_pull(skb, VLAN_HLEN);
610 skb_reset_network_header(skb);
611 vlan_hdr_count++;
612 }
613 goto again;
614 }
615
616 out:
617 /* Restore the skb for the pulled VLAN tags */
618 while (vlan_hdr_count--) {
619 skb_push(skb, VLAN_HLEN);
620 skb_reset_network_header(skb);
621 }
622
623 return action;
624
625 drop:
626 tcf_action_inc_drop_qstats(&p->common);
627 action = TC_ACT_SHOT;
628 goto out;
629 }
630
tcf_csum_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)631 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
632 int ref)
633 {
634 unsigned char *b = skb_tail_pointer(skb);
635 struct tcf_csum *p = to_tcf_csum(a);
636 struct tcf_csum_params *params;
637 struct tc_csum opt = {
638 .index = p->tcf_index,
639 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
640 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
641 };
642 struct tcf_t t;
643
644 spin_lock_bh(&p->tcf_lock);
645 params = rcu_dereference_protected(p->params,
646 lockdep_is_held(&p->tcf_lock));
647 opt.action = p->tcf_action;
648 opt.update_flags = params->update_flags;
649
650 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
651 goto nla_put_failure;
652
653 tcf_tm_dump(&t, &p->tcf_tm);
654 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
655 goto nla_put_failure;
656 spin_unlock_bh(&p->tcf_lock);
657
658 return skb->len;
659
660 nla_put_failure:
661 spin_unlock_bh(&p->tcf_lock);
662 nlmsg_trim(skb, b);
663 return -1;
664 }
665
tcf_csum_cleanup(struct tc_action * a)666 static void tcf_csum_cleanup(struct tc_action *a)
667 {
668 struct tcf_csum *p = to_tcf_csum(a);
669 struct tcf_csum_params *params;
670
671 params = rcu_dereference_protected(p->params, 1);
672 if (params)
673 kfree_rcu(params, rcu);
674 }
675
tcf_csum_get_fill_size(const struct tc_action * act)676 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
677 {
678 return nla_total_size(sizeof(struct tc_csum));
679 }
680
tcf_csum_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)681 static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
682 u32 *index_inc, bool bind,
683 struct netlink_ext_ack *extack)
684 {
685 if (bind) {
686 struct flow_action_entry *entry = entry_data;
687
688 entry->id = FLOW_ACTION_CSUM;
689 entry->csum_flags = tcf_csum_update_flags(act);
690 *index_inc = 1;
691 } else {
692 struct flow_offload_action *fl_action = entry_data;
693
694 fl_action->id = FLOW_ACTION_CSUM;
695 }
696
697 return 0;
698 }
699
700 static struct tc_action_ops act_csum_ops = {
701 .kind = "csum",
702 .id = TCA_ID_CSUM,
703 .owner = THIS_MODULE,
704 .act = tcf_csum_act,
705 .dump = tcf_csum_dump,
706 .init = tcf_csum_init,
707 .cleanup = tcf_csum_cleanup,
708 .get_fill_size = tcf_csum_get_fill_size,
709 .offload_act_setup = tcf_csum_offload_act_setup,
710 .size = sizeof(struct tcf_csum),
711 };
712 MODULE_ALIAS_NET_ACT("csum");
713
csum_init_net(struct net * net)714 static __net_init int csum_init_net(struct net *net)
715 {
716 struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
717
718 return tc_action_net_init(net, tn, &act_csum_ops);
719 }
720
csum_exit_net(struct list_head * net_list)721 static void __net_exit csum_exit_net(struct list_head *net_list)
722 {
723 tc_action_net_exit(net_list, act_csum_ops.net_id);
724 }
725
726 static struct pernet_operations csum_net_ops = {
727 .init = csum_init_net,
728 .exit_batch = csum_exit_net,
729 .id = &act_csum_ops.net_id,
730 .size = sizeof(struct tc_action_net),
731 };
732
733 MODULE_DESCRIPTION("Checksum updating actions");
734 MODULE_LICENSE("GPL");
735
csum_init_module(void)736 static int __init csum_init_module(void)
737 {
738 return tcf_register_action(&act_csum_ops, &csum_net_ops);
739 }
740
csum_cleanup_module(void)741 static void __exit csum_cleanup_module(void)
742 {
743 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
744 }
745
746 module_init(csum_init_module);
747 module_exit(csum_cleanup_module);
748