xref: /linux/net/sched/act_csum.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <linux/skbuff.h>
24 
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
34 
35 #include <net/act_api.h>
36 
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
39 
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
42 };
43 
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
46 
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 			 struct nlattr *est, struct tc_action **a, int ovr,
49 			 int bind, bool rtnl_held,
50 			 struct netlink_ext_ack *extack)
51 {
52 	struct tc_action_net *tn = net_generic(net, csum_net_id);
53 	struct tcf_csum_params *params_new;
54 	struct nlattr *tb[TCA_CSUM_MAX + 1];
55 	struct tc_csum *parm;
56 	struct tcf_csum *p;
57 	int ret = 0, err;
58 
59 	if (nla == NULL)
60 		return -EINVAL;
61 
62 	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
63 	if (err < 0)
64 		return err;
65 
66 	if (tb[TCA_CSUM_PARMS] == NULL)
67 		return -EINVAL;
68 	parm = nla_data(tb[TCA_CSUM_PARMS]);
69 
70 	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
71 	if (!err) {
72 		ret = tcf_idr_create(tn, parm->index, est, a,
73 				     &act_csum_ops, bind, true);
74 		if (ret) {
75 			tcf_idr_cleanup(tn, parm->index);
76 			return ret;
77 		}
78 		ret = ACT_P_CREATED;
79 	} else if (err > 0) {
80 		if (bind)/* dont override defaults */
81 			return 0;
82 		if (!ovr) {
83 			tcf_idr_release(*a, bind);
84 			return -EEXIST;
85 		}
86 	} else {
87 		return err;
88 	}
89 
90 	p = to_tcf_csum(*a);
91 
92 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
93 	if (unlikely(!params_new)) {
94 		tcf_idr_release(*a, bind);
95 		return -ENOMEM;
96 	}
97 	params_new->update_flags = parm->update_flags;
98 
99 	spin_lock_bh(&p->tcf_lock);
100 	p->tcf_action = parm->action;
101 	rcu_swap_protected(p->params, params_new,
102 			   lockdep_is_held(&p->tcf_lock));
103 	spin_unlock_bh(&p->tcf_lock);
104 
105 	if (params_new)
106 		kfree_rcu(params_new, rcu);
107 
108 	if (ret == ACT_P_CREATED)
109 		tcf_idr_insert(tn, *a);
110 
111 	return ret;
112 }
113 
114 /**
115  * tcf_csum_skb_nextlayer - Get next layer pointer
116  * @skb: sk_buff to use
117  * @ihl: previous summed headers length
118  * @ipl: complete packet length
119  * @jhl: next header length
120  *
121  * Check the expected next layer availability in the specified sk_buff.
122  * Return the next layer pointer if pass, NULL otherwise.
123  */
124 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
125 				    unsigned int ihl, unsigned int ipl,
126 				    unsigned int jhl)
127 {
128 	int ntkoff = skb_network_offset(skb);
129 	int hl = ihl + jhl;
130 
131 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
132 	    skb_try_make_writable(skb, hl + ntkoff))
133 		return NULL;
134 	else
135 		return (void *)(skb_network_header(skb) + ihl);
136 }
137 
138 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
139 			      unsigned int ipl)
140 {
141 	struct icmphdr *icmph;
142 
143 	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
144 	if (icmph == NULL)
145 		return 0;
146 
147 	icmph->checksum = 0;
148 	skb->csum = csum_partial(icmph, ipl - ihl, 0);
149 	icmph->checksum = csum_fold(skb->csum);
150 
151 	skb->ip_summed = CHECKSUM_NONE;
152 
153 	return 1;
154 }
155 
156 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
157 			      unsigned int ihl, unsigned int ipl)
158 {
159 	struct igmphdr *igmph;
160 
161 	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
162 	if (igmph == NULL)
163 		return 0;
164 
165 	igmph->csum = 0;
166 	skb->csum = csum_partial(igmph, ipl - ihl, 0);
167 	igmph->csum = csum_fold(skb->csum);
168 
169 	skb->ip_summed = CHECKSUM_NONE;
170 
171 	return 1;
172 }
173 
174 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
175 			      unsigned int ipl)
176 {
177 	struct icmp6hdr *icmp6h;
178 	const struct ipv6hdr *ip6h;
179 
180 	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
181 	if (icmp6h == NULL)
182 		return 0;
183 
184 	ip6h = ipv6_hdr(skb);
185 	icmp6h->icmp6_cksum = 0;
186 	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
187 	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
188 					      ipl - ihl, IPPROTO_ICMPV6,
189 					      skb->csum);
190 
191 	skb->ip_summed = CHECKSUM_NONE;
192 
193 	return 1;
194 }
195 
196 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
197 			     unsigned int ipl)
198 {
199 	struct tcphdr *tcph;
200 	const struct iphdr *iph;
201 
202 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
203 		return 1;
204 
205 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
206 	if (tcph == NULL)
207 		return 0;
208 
209 	iph = ip_hdr(skb);
210 	tcph->check = 0;
211 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
212 	tcph->check = tcp_v4_check(ipl - ihl,
213 				   iph->saddr, iph->daddr, skb->csum);
214 
215 	skb->ip_summed = CHECKSUM_NONE;
216 
217 	return 1;
218 }
219 
220 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
221 			     unsigned int ipl)
222 {
223 	struct tcphdr *tcph;
224 	const struct ipv6hdr *ip6h;
225 
226 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
227 		return 1;
228 
229 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
230 	if (tcph == NULL)
231 		return 0;
232 
233 	ip6h = ipv6_hdr(skb);
234 	tcph->check = 0;
235 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
236 	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
237 				      ipl - ihl, IPPROTO_TCP,
238 				      skb->csum);
239 
240 	skb->ip_summed = CHECKSUM_NONE;
241 
242 	return 1;
243 }
244 
245 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
246 			     unsigned int ipl, int udplite)
247 {
248 	struct udphdr *udph;
249 	const struct iphdr *iph;
250 	u16 ul;
251 
252 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
253 		return 1;
254 
255 	/*
256 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
257 	 * udph->len to get the real length without any protocol check,
258 	 * UDPLITE uses udph->len for another thing,
259 	 * Use iph->tot_len, or just ipl.
260 	 */
261 
262 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
263 	if (udph == NULL)
264 		return 0;
265 
266 	iph = ip_hdr(skb);
267 	ul = ntohs(udph->len);
268 
269 	if (udplite || udph->check) {
270 
271 		udph->check = 0;
272 
273 		if (udplite) {
274 			if (ul == 0)
275 				skb->csum = csum_partial(udph, ipl - ihl, 0);
276 			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
277 				skb->csum = csum_partial(udph, ul, 0);
278 			else
279 				goto ignore_obscure_skb;
280 		} else {
281 			if (ul != ipl - ihl)
282 				goto ignore_obscure_skb;
283 
284 			skb->csum = csum_partial(udph, ul, 0);
285 		}
286 
287 		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
288 						ul, iph->protocol,
289 						skb->csum);
290 
291 		if (!udph->check)
292 			udph->check = CSUM_MANGLED_0;
293 	}
294 
295 	skb->ip_summed = CHECKSUM_NONE;
296 
297 ignore_obscure_skb:
298 	return 1;
299 }
300 
301 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
302 			     unsigned int ipl, int udplite)
303 {
304 	struct udphdr *udph;
305 	const struct ipv6hdr *ip6h;
306 	u16 ul;
307 
308 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
309 		return 1;
310 
311 	/*
312 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
313 	 * udph->len to get the real length without any protocol check,
314 	 * UDPLITE uses udph->len for another thing,
315 	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
316 	 */
317 
318 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
319 	if (udph == NULL)
320 		return 0;
321 
322 	ip6h = ipv6_hdr(skb);
323 	ul = ntohs(udph->len);
324 
325 	udph->check = 0;
326 
327 	if (udplite) {
328 		if (ul == 0)
329 			skb->csum = csum_partial(udph, ipl - ihl, 0);
330 
331 		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
332 			skb->csum = csum_partial(udph, ul, 0);
333 
334 		else
335 			goto ignore_obscure_skb;
336 	} else {
337 		if (ul != ipl - ihl)
338 			goto ignore_obscure_skb;
339 
340 		skb->csum = csum_partial(udph, ul, 0);
341 	}
342 
343 	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
344 				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
345 				      skb->csum);
346 
347 	if (!udph->check)
348 		udph->check = CSUM_MANGLED_0;
349 
350 	skb->ip_summed = CHECKSUM_NONE;
351 
352 ignore_obscure_skb:
353 	return 1;
354 }
355 
356 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
357 			 unsigned int ipl)
358 {
359 	struct sctphdr *sctph;
360 
361 	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
362 		return 1;
363 
364 	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
365 	if (!sctph)
366 		return 0;
367 
368 	sctph->checksum = sctp_compute_cksum(skb,
369 					     skb_network_offset(skb) + ihl);
370 	skb->ip_summed = CHECKSUM_NONE;
371 	skb->csum_not_inet = 0;
372 
373 	return 1;
374 }
375 
376 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
377 {
378 	const struct iphdr *iph;
379 	int ntkoff;
380 
381 	ntkoff = skb_network_offset(skb);
382 
383 	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
384 		goto fail;
385 
386 	iph = ip_hdr(skb);
387 
388 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
389 	case IPPROTO_ICMP:
390 		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
391 			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
392 						ntohs(iph->tot_len)))
393 				goto fail;
394 		break;
395 	case IPPROTO_IGMP:
396 		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
397 			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
398 						ntohs(iph->tot_len)))
399 				goto fail;
400 		break;
401 	case IPPROTO_TCP:
402 		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
403 			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
404 					       ntohs(iph->tot_len)))
405 				goto fail;
406 		break;
407 	case IPPROTO_UDP:
408 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
409 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
410 					       ntohs(iph->tot_len), 0))
411 				goto fail;
412 		break;
413 	case IPPROTO_UDPLITE:
414 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
415 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
416 					       ntohs(iph->tot_len), 1))
417 				goto fail;
418 		break;
419 	case IPPROTO_SCTP:
420 		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
421 		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
422 			goto fail;
423 		break;
424 	}
425 
426 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
427 		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
428 			goto fail;
429 
430 		ip_send_check(ip_hdr(skb));
431 	}
432 
433 	return 1;
434 
435 fail:
436 	return 0;
437 }
438 
439 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
440 				 unsigned int *pl)
441 {
442 	int off, len, optlen;
443 	unsigned char *xh = (void *)ip6xh;
444 
445 	off = sizeof(*ip6xh);
446 	len = ixhl - off;
447 
448 	while (len > 1) {
449 		switch (xh[off]) {
450 		case IPV6_TLV_PAD1:
451 			optlen = 1;
452 			break;
453 		case IPV6_TLV_JUMBO:
454 			optlen = xh[off + 1] + 2;
455 			if (optlen != 6 || len < 6 || (off & 3) != 2)
456 				/* wrong jumbo option length/alignment */
457 				return 0;
458 			*pl = ntohl(*(__be32 *)(xh + off + 2));
459 			goto done;
460 		default:
461 			optlen = xh[off + 1] + 2;
462 			if (optlen > len)
463 				/* ignore obscure options */
464 				goto done;
465 			break;
466 		}
467 		off += optlen;
468 		len -= optlen;
469 	}
470 
471 done:
472 	return 1;
473 }
474 
475 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
476 {
477 	struct ipv6hdr *ip6h;
478 	struct ipv6_opt_hdr *ip6xh;
479 	unsigned int hl, ixhl;
480 	unsigned int pl;
481 	int ntkoff;
482 	u8 nexthdr;
483 
484 	ntkoff = skb_network_offset(skb);
485 
486 	hl = sizeof(*ip6h);
487 
488 	if (!pskb_may_pull(skb, hl + ntkoff))
489 		goto fail;
490 
491 	ip6h = ipv6_hdr(skb);
492 
493 	pl = ntohs(ip6h->payload_len);
494 	nexthdr = ip6h->nexthdr;
495 
496 	do {
497 		switch (nexthdr) {
498 		case NEXTHDR_FRAGMENT:
499 			goto ignore_skb;
500 		case NEXTHDR_ROUTING:
501 		case NEXTHDR_HOP:
502 		case NEXTHDR_DEST:
503 			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
504 				goto fail;
505 			ip6xh = (void *)(skb_network_header(skb) + hl);
506 			ixhl = ipv6_optlen(ip6xh);
507 			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
508 				goto fail;
509 			ip6xh = (void *)(skb_network_header(skb) + hl);
510 			if ((nexthdr == NEXTHDR_HOP) &&
511 			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
512 				goto fail;
513 			nexthdr = ip6xh->nexthdr;
514 			hl += ixhl;
515 			break;
516 		case IPPROTO_ICMPV6:
517 			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
518 				if (!tcf_csum_ipv6_icmp(skb,
519 							hl, pl + sizeof(*ip6h)))
520 					goto fail;
521 			goto done;
522 		case IPPROTO_TCP:
523 			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
524 				if (!tcf_csum_ipv6_tcp(skb,
525 						       hl, pl + sizeof(*ip6h)))
526 					goto fail;
527 			goto done;
528 		case IPPROTO_UDP:
529 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
530 				if (!tcf_csum_ipv6_udp(skb, hl,
531 						       pl + sizeof(*ip6h), 0))
532 					goto fail;
533 			goto done;
534 		case IPPROTO_UDPLITE:
535 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
536 				if (!tcf_csum_ipv6_udp(skb, hl,
537 						       pl + sizeof(*ip6h), 1))
538 					goto fail;
539 			goto done;
540 		case IPPROTO_SCTP:
541 			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
542 			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
543 				goto fail;
544 			goto done;
545 		default:
546 			goto ignore_skb;
547 		}
548 	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
549 
550 done:
551 ignore_skb:
552 	return 1;
553 
554 fail:
555 	return 0;
556 }
557 
558 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
559 			struct tcf_result *res)
560 {
561 	struct tcf_csum *p = to_tcf_csum(a);
562 	bool orig_vlan_tag_present = false;
563 	unsigned int vlan_hdr_count = 0;
564 	struct tcf_csum_params *params;
565 	u32 update_flags;
566 	__be16 protocol;
567 	int action;
568 
569 	params = rcu_dereference_bh(p->params);
570 
571 	tcf_lastuse_update(&p->tcf_tm);
572 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
573 
574 	action = READ_ONCE(p->tcf_action);
575 	if (unlikely(action == TC_ACT_SHOT))
576 		goto drop;
577 
578 	update_flags = params->update_flags;
579 	protocol = tc_skb_protocol(skb);
580 again:
581 	switch (protocol) {
582 	case cpu_to_be16(ETH_P_IP):
583 		if (!tcf_csum_ipv4(skb, update_flags))
584 			goto drop;
585 		break;
586 	case cpu_to_be16(ETH_P_IPV6):
587 		if (!tcf_csum_ipv6(skb, update_flags))
588 			goto drop;
589 		break;
590 	case cpu_to_be16(ETH_P_8021AD): /* fall through */
591 	case cpu_to_be16(ETH_P_8021Q):
592 		if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
593 			protocol = skb->protocol;
594 			orig_vlan_tag_present = true;
595 		} else {
596 			struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
597 
598 			protocol = vlan->h_vlan_encapsulated_proto;
599 			skb_pull(skb, VLAN_HLEN);
600 			skb_reset_network_header(skb);
601 			vlan_hdr_count++;
602 		}
603 		goto again;
604 	}
605 
606 out:
607 	/* Restore the skb for the pulled VLAN tags */
608 	while (vlan_hdr_count--) {
609 		skb_push(skb, VLAN_HLEN);
610 		skb_reset_network_header(skb);
611 	}
612 
613 	return action;
614 
615 drop:
616 	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
617 	action = TC_ACT_SHOT;
618 	goto out;
619 }
620 
621 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
622 			 int ref)
623 {
624 	unsigned char *b = skb_tail_pointer(skb);
625 	struct tcf_csum *p = to_tcf_csum(a);
626 	struct tcf_csum_params *params;
627 	struct tc_csum opt = {
628 		.index   = p->tcf_index,
629 		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
630 		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
631 	};
632 	struct tcf_t t;
633 
634 	spin_lock_bh(&p->tcf_lock);
635 	params = rcu_dereference_protected(p->params,
636 					   lockdep_is_held(&p->tcf_lock));
637 	opt.action = p->tcf_action;
638 	opt.update_flags = params->update_flags;
639 
640 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
641 		goto nla_put_failure;
642 
643 	tcf_tm_dump(&t, &p->tcf_tm);
644 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
645 		goto nla_put_failure;
646 	spin_unlock_bh(&p->tcf_lock);
647 
648 	return skb->len;
649 
650 nla_put_failure:
651 	spin_unlock_bh(&p->tcf_lock);
652 	nlmsg_trim(skb, b);
653 	return -1;
654 }
655 
656 static void tcf_csum_cleanup(struct tc_action *a)
657 {
658 	struct tcf_csum *p = to_tcf_csum(a);
659 	struct tcf_csum_params *params;
660 
661 	params = rcu_dereference_protected(p->params, 1);
662 	if (params)
663 		kfree_rcu(params, rcu);
664 }
665 
666 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
667 			   struct netlink_callback *cb, int type,
668 			   const struct tc_action_ops *ops,
669 			   struct netlink_ext_ack *extack)
670 {
671 	struct tc_action_net *tn = net_generic(net, csum_net_id);
672 
673 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
674 }
675 
676 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
677 {
678 	struct tc_action_net *tn = net_generic(net, csum_net_id);
679 
680 	return tcf_idr_search(tn, a, index);
681 }
682 
683 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
684 {
685 	return nla_total_size(sizeof(struct tc_csum));
686 }
687 
688 static struct tc_action_ops act_csum_ops = {
689 	.kind		= "csum",
690 	.id		= TCA_ID_CSUM,
691 	.owner		= THIS_MODULE,
692 	.act		= tcf_csum_act,
693 	.dump		= tcf_csum_dump,
694 	.init		= tcf_csum_init,
695 	.cleanup	= tcf_csum_cleanup,
696 	.walk		= tcf_csum_walker,
697 	.lookup		= tcf_csum_search,
698 	.get_fill_size  = tcf_csum_get_fill_size,
699 	.size		= sizeof(struct tcf_csum),
700 };
701 
702 static __net_init int csum_init_net(struct net *net)
703 {
704 	struct tc_action_net *tn = net_generic(net, csum_net_id);
705 
706 	return tc_action_net_init(tn, &act_csum_ops);
707 }
708 
709 static void __net_exit csum_exit_net(struct list_head *net_list)
710 {
711 	tc_action_net_exit(net_list, csum_net_id);
712 }
713 
714 static struct pernet_operations csum_net_ops = {
715 	.init = csum_init_net,
716 	.exit_batch = csum_exit_net,
717 	.id   = &csum_net_id,
718 	.size = sizeof(struct tc_action_net),
719 };
720 
721 MODULE_DESCRIPTION("Checksum updating actions");
722 MODULE_LICENSE("GPL");
723 
724 static int __init csum_init_module(void)
725 {
726 	return tcf_register_action(&act_csum_ops, &csum_net_ops);
727 }
728 
729 static void __exit csum_cleanup_module(void)
730 {
731 	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
732 }
733 
734 module_init(csum_init_module);
735 module_exit(csum_cleanup_module);
736