xref: /linux/net/sched/act_csum.c (revision c411ed854584a71b0e86ac3019b60e4789d88086)
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <linux/skbuff.h>
24 
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
34 
35 #include <net/act_api.h>
36 
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
39 
40 #define CSUM_TAB_MASK 15
41 
42 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
43 	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 };
45 
46 static unsigned int csum_net_id;
47 static struct tc_action_ops act_csum_ops;
48 
49 static int tcf_csum_init(struct net *net, struct nlattr *nla,
50 			 struct nlattr *est, struct tc_action **a, int ovr,
51 			 int bind)
52 {
53 	struct tc_action_net *tn = net_generic(net, csum_net_id);
54 	struct nlattr *tb[TCA_CSUM_MAX + 1];
55 	struct tc_csum *parm;
56 	struct tcf_csum *p;
57 	int ret = 0, err;
58 
59 	if (nla == NULL)
60 		return -EINVAL;
61 
62 	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
63 	if (err < 0)
64 		return err;
65 
66 	if (tb[TCA_CSUM_PARMS] == NULL)
67 		return -EINVAL;
68 	parm = nla_data(tb[TCA_CSUM_PARMS]);
69 
70 	if (!tcf_hash_check(tn, parm->index, a, bind)) {
71 		ret = tcf_hash_create(tn, parm->index, est, a,
72 				      &act_csum_ops, bind, false);
73 		if (ret)
74 			return ret;
75 		ret = ACT_P_CREATED;
76 	} else {
77 		if (bind)/* dont override defaults */
78 			return 0;
79 		tcf_hash_release(*a, bind);
80 		if (!ovr)
81 			return -EEXIST;
82 	}
83 
84 	p = to_tcf_csum(*a);
85 	spin_lock_bh(&p->tcf_lock);
86 	p->tcf_action = parm->action;
87 	p->update_flags = parm->update_flags;
88 	spin_unlock_bh(&p->tcf_lock);
89 
90 	if (ret == ACT_P_CREATED)
91 		tcf_hash_insert(tn, *a);
92 
93 	return ret;
94 }
95 
96 /**
97  * tcf_csum_skb_nextlayer - Get next layer pointer
98  * @skb: sk_buff to use
99  * @ihl: previous summed headers length
100  * @ipl: complete packet length
101  * @jhl: next header length
102  *
103  * Check the expected next layer availability in the specified sk_buff.
104  * Return the next layer pointer if pass, NULL otherwise.
105  */
106 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
107 				    unsigned int ihl, unsigned int ipl,
108 				    unsigned int jhl)
109 {
110 	int ntkoff = skb_network_offset(skb);
111 	int hl = ihl + jhl;
112 
113 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
114 	    skb_try_make_writable(skb, hl + ntkoff))
115 		return NULL;
116 	else
117 		return (void *)(skb_network_header(skb) + ihl);
118 }
119 
120 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
121 			      unsigned int ipl)
122 {
123 	struct icmphdr *icmph;
124 
125 	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
126 	if (icmph == NULL)
127 		return 0;
128 
129 	icmph->checksum = 0;
130 	skb->csum = csum_partial(icmph, ipl - ihl, 0);
131 	icmph->checksum = csum_fold(skb->csum);
132 
133 	skb->ip_summed = CHECKSUM_NONE;
134 
135 	return 1;
136 }
137 
138 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
139 			      unsigned int ihl, unsigned int ipl)
140 {
141 	struct igmphdr *igmph;
142 
143 	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
144 	if (igmph == NULL)
145 		return 0;
146 
147 	igmph->csum = 0;
148 	skb->csum = csum_partial(igmph, ipl - ihl, 0);
149 	igmph->csum = csum_fold(skb->csum);
150 
151 	skb->ip_summed = CHECKSUM_NONE;
152 
153 	return 1;
154 }
155 
156 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
157 			      unsigned int ipl)
158 {
159 	struct icmp6hdr *icmp6h;
160 	const struct ipv6hdr *ip6h;
161 
162 	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
163 	if (icmp6h == NULL)
164 		return 0;
165 
166 	ip6h = ipv6_hdr(skb);
167 	icmp6h->icmp6_cksum = 0;
168 	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
169 	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
170 					      ipl - ihl, IPPROTO_ICMPV6,
171 					      skb->csum);
172 
173 	skb->ip_summed = CHECKSUM_NONE;
174 
175 	return 1;
176 }
177 
178 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
179 			     unsigned int ipl)
180 {
181 	struct tcphdr *tcph;
182 	const struct iphdr *iph;
183 
184 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
185 		return 1;
186 
187 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
188 	if (tcph == NULL)
189 		return 0;
190 
191 	iph = ip_hdr(skb);
192 	tcph->check = 0;
193 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
194 	tcph->check = tcp_v4_check(ipl - ihl,
195 				   iph->saddr, iph->daddr, skb->csum);
196 
197 	skb->ip_summed = CHECKSUM_NONE;
198 
199 	return 1;
200 }
201 
202 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
203 			     unsigned int ipl)
204 {
205 	struct tcphdr *tcph;
206 	const struct ipv6hdr *ip6h;
207 
208 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
209 		return 1;
210 
211 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
212 	if (tcph == NULL)
213 		return 0;
214 
215 	ip6h = ipv6_hdr(skb);
216 	tcph->check = 0;
217 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
218 	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
219 				      ipl - ihl, IPPROTO_TCP,
220 				      skb->csum);
221 
222 	skb->ip_summed = CHECKSUM_NONE;
223 
224 	return 1;
225 }
226 
227 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
228 			     unsigned int ipl, int udplite)
229 {
230 	struct udphdr *udph;
231 	const struct iphdr *iph;
232 	u16 ul;
233 
234 	/*
235 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
236 	 * udph->len to get the real length without any protocol check,
237 	 * UDPLITE uses udph->len for another thing,
238 	 * Use iph->tot_len, or just ipl.
239 	 */
240 
241 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
242 	if (udph == NULL)
243 		return 0;
244 
245 	iph = ip_hdr(skb);
246 	ul = ntohs(udph->len);
247 
248 	if (udplite || udph->check) {
249 
250 		udph->check = 0;
251 
252 		if (udplite) {
253 			if (ul == 0)
254 				skb->csum = csum_partial(udph, ipl - ihl, 0);
255 			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
256 				skb->csum = csum_partial(udph, ul, 0);
257 			else
258 				goto ignore_obscure_skb;
259 		} else {
260 			if (ul != ipl - ihl)
261 				goto ignore_obscure_skb;
262 
263 			skb->csum = csum_partial(udph, ul, 0);
264 		}
265 
266 		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
267 						ul, iph->protocol,
268 						skb->csum);
269 
270 		if (!udph->check)
271 			udph->check = CSUM_MANGLED_0;
272 	}
273 
274 	skb->ip_summed = CHECKSUM_NONE;
275 
276 ignore_obscure_skb:
277 	return 1;
278 }
279 
280 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
281 			     unsigned int ipl, int udplite)
282 {
283 	struct udphdr *udph;
284 	const struct ipv6hdr *ip6h;
285 	u16 ul;
286 
287 	/*
288 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
289 	 * udph->len to get the real length without any protocol check,
290 	 * UDPLITE uses udph->len for another thing,
291 	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
292 	 */
293 
294 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
295 	if (udph == NULL)
296 		return 0;
297 
298 	ip6h = ipv6_hdr(skb);
299 	ul = ntohs(udph->len);
300 
301 	udph->check = 0;
302 
303 	if (udplite) {
304 		if (ul == 0)
305 			skb->csum = csum_partial(udph, ipl - ihl, 0);
306 
307 		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
308 			skb->csum = csum_partial(udph, ul, 0);
309 
310 		else
311 			goto ignore_obscure_skb;
312 	} else {
313 		if (ul != ipl - ihl)
314 			goto ignore_obscure_skb;
315 
316 		skb->csum = csum_partial(udph, ul, 0);
317 	}
318 
319 	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
320 				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
321 				      skb->csum);
322 
323 	if (!udph->check)
324 		udph->check = CSUM_MANGLED_0;
325 
326 	skb->ip_summed = CHECKSUM_NONE;
327 
328 ignore_obscure_skb:
329 	return 1;
330 }
331 
332 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
333 			 unsigned int ipl)
334 {
335 	struct sctphdr *sctph;
336 
337 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
338 		return 1;
339 
340 	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
341 	if (!sctph)
342 		return 0;
343 
344 	sctph->checksum = sctp_compute_cksum(skb,
345 					     skb_network_offset(skb) + ihl);
346 	skb->ip_summed = CHECKSUM_NONE;
347 	skb->csum_not_inet = 0;
348 
349 	return 1;
350 }
351 
352 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
353 {
354 	const struct iphdr *iph;
355 	int ntkoff;
356 
357 	ntkoff = skb_network_offset(skb);
358 
359 	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
360 		goto fail;
361 
362 	iph = ip_hdr(skb);
363 
364 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
365 	case IPPROTO_ICMP:
366 		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
367 			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
368 						ntohs(iph->tot_len)))
369 				goto fail;
370 		break;
371 	case IPPROTO_IGMP:
372 		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
373 			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
374 						ntohs(iph->tot_len)))
375 				goto fail;
376 		break;
377 	case IPPROTO_TCP:
378 		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
379 			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
380 					       ntohs(iph->tot_len)))
381 				goto fail;
382 		break;
383 	case IPPROTO_UDP:
384 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
385 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
386 					       ntohs(iph->tot_len), 0))
387 				goto fail;
388 		break;
389 	case IPPROTO_UDPLITE:
390 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
391 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
392 					       ntohs(iph->tot_len), 1))
393 				goto fail;
394 		break;
395 	case IPPROTO_SCTP:
396 		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
397 		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
398 			goto fail;
399 		break;
400 	}
401 
402 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
403 		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
404 			goto fail;
405 
406 		ip_send_check(ip_hdr(skb));
407 	}
408 
409 	return 1;
410 
411 fail:
412 	return 0;
413 }
414 
415 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
416 				 unsigned int *pl)
417 {
418 	int off, len, optlen;
419 	unsigned char *xh = (void *)ip6xh;
420 
421 	off = sizeof(*ip6xh);
422 	len = ixhl - off;
423 
424 	while (len > 1) {
425 		switch (xh[off]) {
426 		case IPV6_TLV_PAD1:
427 			optlen = 1;
428 			break;
429 		case IPV6_TLV_JUMBO:
430 			optlen = xh[off + 1] + 2;
431 			if (optlen != 6 || len < 6 || (off & 3) != 2)
432 				/* wrong jumbo option length/alignment */
433 				return 0;
434 			*pl = ntohl(*(__be32 *)(xh + off + 2));
435 			goto done;
436 		default:
437 			optlen = xh[off + 1] + 2;
438 			if (optlen > len)
439 				/* ignore obscure options */
440 				goto done;
441 			break;
442 		}
443 		off += optlen;
444 		len -= optlen;
445 	}
446 
447 done:
448 	return 1;
449 }
450 
451 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
452 {
453 	struct ipv6hdr *ip6h;
454 	struct ipv6_opt_hdr *ip6xh;
455 	unsigned int hl, ixhl;
456 	unsigned int pl;
457 	int ntkoff;
458 	u8 nexthdr;
459 
460 	ntkoff = skb_network_offset(skb);
461 
462 	hl = sizeof(*ip6h);
463 
464 	if (!pskb_may_pull(skb, hl + ntkoff))
465 		goto fail;
466 
467 	ip6h = ipv6_hdr(skb);
468 
469 	pl = ntohs(ip6h->payload_len);
470 	nexthdr = ip6h->nexthdr;
471 
472 	do {
473 		switch (nexthdr) {
474 		case NEXTHDR_FRAGMENT:
475 			goto ignore_skb;
476 		case NEXTHDR_ROUTING:
477 		case NEXTHDR_HOP:
478 		case NEXTHDR_DEST:
479 			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
480 				goto fail;
481 			ip6xh = (void *)(skb_network_header(skb) + hl);
482 			ixhl = ipv6_optlen(ip6xh);
483 			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
484 				goto fail;
485 			ip6xh = (void *)(skb_network_header(skb) + hl);
486 			if ((nexthdr == NEXTHDR_HOP) &&
487 			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
488 				goto fail;
489 			nexthdr = ip6xh->nexthdr;
490 			hl += ixhl;
491 			break;
492 		case IPPROTO_ICMPV6:
493 			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
494 				if (!tcf_csum_ipv6_icmp(skb,
495 							hl, pl + sizeof(*ip6h)))
496 					goto fail;
497 			goto done;
498 		case IPPROTO_TCP:
499 			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
500 				if (!tcf_csum_ipv6_tcp(skb,
501 						       hl, pl + sizeof(*ip6h)))
502 					goto fail;
503 			goto done;
504 		case IPPROTO_UDP:
505 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
506 				if (!tcf_csum_ipv6_udp(skb, hl,
507 						       pl + sizeof(*ip6h), 0))
508 					goto fail;
509 			goto done;
510 		case IPPROTO_UDPLITE:
511 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
512 				if (!tcf_csum_ipv6_udp(skb, hl,
513 						       pl + sizeof(*ip6h), 1))
514 					goto fail;
515 			goto done;
516 		case IPPROTO_SCTP:
517 			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
518 			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
519 				goto fail;
520 			goto done;
521 		default:
522 			goto ignore_skb;
523 		}
524 	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
525 
526 done:
527 ignore_skb:
528 	return 1;
529 
530 fail:
531 	return 0;
532 }
533 
534 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
535 		    struct tcf_result *res)
536 {
537 	struct tcf_csum *p = to_tcf_csum(a);
538 	int action;
539 	u32 update_flags;
540 
541 	spin_lock(&p->tcf_lock);
542 	tcf_lastuse_update(&p->tcf_tm);
543 	bstats_update(&p->tcf_bstats, skb);
544 	action = p->tcf_action;
545 	update_flags = p->update_flags;
546 	spin_unlock(&p->tcf_lock);
547 
548 	if (unlikely(action == TC_ACT_SHOT))
549 		goto drop;
550 
551 	switch (tc_skb_protocol(skb)) {
552 	case cpu_to_be16(ETH_P_IP):
553 		if (!tcf_csum_ipv4(skb, update_flags))
554 			goto drop;
555 		break;
556 	case cpu_to_be16(ETH_P_IPV6):
557 		if (!tcf_csum_ipv6(skb, update_flags))
558 			goto drop;
559 		break;
560 	}
561 
562 	return action;
563 
564 drop:
565 	spin_lock(&p->tcf_lock);
566 	p->tcf_qstats.drops++;
567 	spin_unlock(&p->tcf_lock);
568 	return TC_ACT_SHOT;
569 }
570 
571 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
572 			 int ref)
573 {
574 	unsigned char *b = skb_tail_pointer(skb);
575 	struct tcf_csum *p = to_tcf_csum(a);
576 	struct tc_csum opt = {
577 		.update_flags = p->update_flags,
578 		.index   = p->tcf_index,
579 		.action  = p->tcf_action,
580 		.refcnt  = p->tcf_refcnt - ref,
581 		.bindcnt = p->tcf_bindcnt - bind,
582 	};
583 	struct tcf_t t;
584 
585 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
586 		goto nla_put_failure;
587 
588 	tcf_tm_dump(&t, &p->tcf_tm);
589 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
590 		goto nla_put_failure;
591 
592 	return skb->len;
593 
594 nla_put_failure:
595 	nlmsg_trim(skb, b);
596 	return -1;
597 }
598 
599 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
600 			   struct netlink_callback *cb, int type,
601 			   const struct tc_action_ops *ops)
602 {
603 	struct tc_action_net *tn = net_generic(net, csum_net_id);
604 
605 	return tcf_generic_walker(tn, skb, cb, type, ops);
606 }
607 
608 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
609 {
610 	struct tc_action_net *tn = net_generic(net, csum_net_id);
611 
612 	return tcf_hash_search(tn, a, index);
613 }
614 
615 static struct tc_action_ops act_csum_ops = {
616 	.kind		= "csum",
617 	.type		= TCA_ACT_CSUM,
618 	.owner		= THIS_MODULE,
619 	.act		= tcf_csum,
620 	.dump		= tcf_csum_dump,
621 	.init		= tcf_csum_init,
622 	.walk		= tcf_csum_walker,
623 	.lookup		= tcf_csum_search,
624 	.size		= sizeof(struct tcf_csum),
625 };
626 
627 static __net_init int csum_init_net(struct net *net)
628 {
629 	struct tc_action_net *tn = net_generic(net, csum_net_id);
630 
631 	return tc_action_net_init(tn, &act_csum_ops, CSUM_TAB_MASK);
632 }
633 
634 static void __net_exit csum_exit_net(struct net *net)
635 {
636 	struct tc_action_net *tn = net_generic(net, csum_net_id);
637 
638 	tc_action_net_exit(tn);
639 }
640 
641 static struct pernet_operations csum_net_ops = {
642 	.init = csum_init_net,
643 	.exit = csum_exit_net,
644 	.id   = &csum_net_id,
645 	.size = sizeof(struct tc_action_net),
646 };
647 
648 MODULE_DESCRIPTION("Checksum updating actions");
649 MODULE_LICENSE("GPL");
650 
651 static int __init csum_init_module(void)
652 {
653 	return tcf_register_action(&act_csum_ops, &csum_net_ops);
654 }
655 
656 static void __exit csum_cleanup_module(void)
657 {
658 	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
659 }
660 
661 module_init(csum_init_module);
662 module_exit(csum_cleanup_module);
663