xref: /linux/net/ipv6/seg6_iptunnel.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  *  SR-IPv6 implementation
3  *
4  *  Author:
5  *  David Lebrun <david.lebrun@uclouvain.be>
6  *
7  *
8  *  This program is free software; you can redistribute it and/or
9  *        modify it under the terms of the GNU General Public License
10  *        as published by the Free Software Foundation; either version
11  *        2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/skbuff.h>
16 #include <linux/net.h>
17 #include <linux/module.h>
18 #include <net/ip.h>
19 #include <net/lwtunnel.h>
20 #include <net/netevent.h>
21 #include <net/netns/generic.h>
22 #include <net/ip6_fib.h>
23 #include <net/route.h>
24 #include <net/seg6.h>
25 #include <linux/seg6.h>
26 #include <linux/seg6_iptunnel.h>
27 #include <net/addrconf.h>
28 #include <net/ip6_route.h>
29 #ifdef CONFIG_DST_CACHE
30 #include <net/dst_cache.h>
31 #endif
32 #ifdef CONFIG_IPV6_SEG6_HMAC
33 #include <net/seg6_hmac.h>
34 #endif
35 
36 struct seg6_lwt {
37 #ifdef CONFIG_DST_CACHE
38 	struct dst_cache cache;
39 #endif
40 	struct seg6_iptunnel_encap tuninfo[0];
41 };
42 
43 static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt)
44 {
45 	return (struct seg6_lwt *)lwt->data;
46 }
47 
48 static inline struct seg6_iptunnel_encap *
49 seg6_encap_lwtunnel(struct lwtunnel_state *lwt)
50 {
51 	return seg6_lwt_lwtunnel(lwt)->tuninfo;
52 }
53 
54 static const struct nla_policy seg6_iptunnel_policy[SEG6_IPTUNNEL_MAX + 1] = {
55 	[SEG6_IPTUNNEL_SRH]	= { .type = NLA_BINARY },
56 };
57 
58 static int nla_put_srh(struct sk_buff *skb, int attrtype,
59 		       struct seg6_iptunnel_encap *tuninfo)
60 {
61 	struct seg6_iptunnel_encap *data;
62 	struct nlattr *nla;
63 	int len;
64 
65 	len = SEG6_IPTUN_ENCAP_SIZE(tuninfo);
66 
67 	nla = nla_reserve(skb, attrtype, len);
68 	if (!nla)
69 		return -EMSGSIZE;
70 
71 	data = nla_data(nla);
72 	memcpy(data, tuninfo, len);
73 
74 	return 0;
75 }
76 
77 static void set_tun_src(struct net *net, struct net_device *dev,
78 			struct in6_addr *daddr, struct in6_addr *saddr)
79 {
80 	struct seg6_pernet_data *sdata = seg6_pernet(net);
81 	struct in6_addr *tun_src;
82 
83 	rcu_read_lock();
84 
85 	tun_src = rcu_dereference(sdata->tun_src);
86 
87 	if (!ipv6_addr_any(tun_src)) {
88 		memcpy(saddr, tun_src, sizeof(struct in6_addr));
89 	} else {
90 		ipv6_dev_get_saddr(net, dev, daddr, IPV6_PREFER_SRC_PUBLIC,
91 				   saddr);
92 	}
93 
94 	rcu_read_unlock();
95 }
96 
97 /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
98 static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
99 {
100 	struct net *net = dev_net(skb_dst(skb)->dev);
101 	struct ipv6hdr *hdr, *inner_hdr;
102 	struct ipv6_sr_hdr *isrh;
103 	int hdrlen, tot_len, err;
104 
105 	hdrlen = (osrh->hdrlen + 1) << 3;
106 	tot_len = hdrlen + sizeof(*hdr);
107 
108 	err = pskb_expand_head(skb, tot_len, 0, GFP_ATOMIC);
109 	if (unlikely(err))
110 		return err;
111 
112 	inner_hdr = ipv6_hdr(skb);
113 
114 	skb_push(skb, tot_len);
115 	skb_reset_network_header(skb);
116 	skb_mac_header_rebuild(skb);
117 	hdr = ipv6_hdr(skb);
118 
119 	/* inherit tc, flowlabel and hlim
120 	 * hlim will be decremented in ip6_forward() afterwards and
121 	 * decapsulation will overwrite inner hlim with outer hlim
122 	 */
123 	ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)),
124 		     ip6_flowlabel(inner_hdr));
125 	hdr->hop_limit = inner_hdr->hop_limit;
126 	hdr->nexthdr = NEXTHDR_ROUTING;
127 
128 	isrh = (void *)hdr + sizeof(*hdr);
129 	memcpy(isrh, osrh, hdrlen);
130 
131 	isrh->nexthdr = NEXTHDR_IPV6;
132 
133 	hdr->daddr = isrh->segments[isrh->first_segment];
134 	set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
135 
136 #ifdef CONFIG_IPV6_SEG6_HMAC
137 	if (sr_has_hmac(isrh)) {
138 		err = seg6_push_hmac(net, &hdr->saddr, isrh);
139 		if (unlikely(err))
140 			return err;
141 	}
142 #endif
143 
144 	skb_postpush_rcsum(skb, hdr, tot_len);
145 
146 	return 0;
147 }
148 
149 /* insert an SRH within an IPv6 packet, just after the IPv6 header */
150 #ifdef CONFIG_IPV6_SEG6_INLINE
151 static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
152 {
153 	struct ipv6hdr *hdr, *oldhdr;
154 	struct ipv6_sr_hdr *isrh;
155 	int hdrlen, err;
156 
157 	hdrlen = (osrh->hdrlen + 1) << 3;
158 
159 	err = pskb_expand_head(skb, hdrlen, 0, GFP_ATOMIC);
160 	if (unlikely(err))
161 		return err;
162 
163 	oldhdr = ipv6_hdr(skb);
164 
165 	skb_pull(skb, sizeof(struct ipv6hdr));
166 	skb_postpull_rcsum(skb, skb_network_header(skb),
167 			   sizeof(struct ipv6hdr));
168 
169 	skb_push(skb, sizeof(struct ipv6hdr) + hdrlen);
170 	skb_reset_network_header(skb);
171 	skb_mac_header_rebuild(skb);
172 
173 	hdr = ipv6_hdr(skb);
174 
175 	memmove(hdr, oldhdr, sizeof(*hdr));
176 
177 	isrh = (void *)hdr + sizeof(*hdr);
178 	memcpy(isrh, osrh, hdrlen);
179 
180 	isrh->nexthdr = hdr->nexthdr;
181 	hdr->nexthdr = NEXTHDR_ROUTING;
182 
183 	isrh->segments[0] = hdr->daddr;
184 	hdr->daddr = isrh->segments[isrh->first_segment];
185 
186 #ifdef CONFIG_IPV6_SEG6_HMAC
187 	if (sr_has_hmac(isrh)) {
188 		struct net *net = dev_net(skb_dst(skb)->dev);
189 
190 		err = seg6_push_hmac(net, &hdr->saddr, isrh);
191 		if (unlikely(err))
192 			return err;
193 	}
194 #endif
195 
196 	skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);
197 
198 	return 0;
199 }
200 #endif
201 
202 static int seg6_do_srh(struct sk_buff *skb)
203 {
204 	struct dst_entry *dst = skb_dst(skb);
205 	struct seg6_iptunnel_encap *tinfo;
206 	int err = 0;
207 
208 	tinfo = seg6_encap_lwtunnel(dst->lwtstate);
209 
210 	if (likely(!skb->encapsulation)) {
211 		skb_reset_inner_headers(skb);
212 		skb->encapsulation = 1;
213 	}
214 
215 	switch (tinfo->mode) {
216 #ifdef CONFIG_IPV6_SEG6_INLINE
217 	case SEG6_IPTUN_MODE_INLINE:
218 		err = seg6_do_srh_inline(skb, tinfo->srh);
219 		skb_reset_inner_headers(skb);
220 		break;
221 #endif
222 	case SEG6_IPTUN_MODE_ENCAP:
223 		err = seg6_do_srh_encap(skb, tinfo->srh);
224 		break;
225 	}
226 
227 	if (err)
228 		return err;
229 
230 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
231 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
232 
233 	skb_set_inner_protocol(skb, skb->protocol);
234 
235 	return 0;
236 }
237 
238 static int seg6_input(struct sk_buff *skb)
239 {
240 	int err;
241 
242 	err = seg6_do_srh(skb);
243 	if (unlikely(err)) {
244 		kfree_skb(skb);
245 		return err;
246 	}
247 
248 	skb_dst_drop(skb);
249 	ip6_route_input(skb);
250 
251 	return dst_input(skb);
252 }
253 
254 static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
255 {
256 	struct dst_entry *orig_dst = skb_dst(skb);
257 	struct dst_entry *dst = NULL;
258 	struct seg6_lwt *slwt;
259 	int err = -EINVAL;
260 
261 	err = seg6_do_srh(skb);
262 	if (unlikely(err))
263 		goto drop;
264 
265 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
266 
267 #ifdef CONFIG_DST_CACHE
268 	preempt_disable();
269 	dst = dst_cache_get(&slwt->cache);
270 	preempt_enable();
271 #endif
272 
273 	if (unlikely(!dst)) {
274 		struct ipv6hdr *hdr = ipv6_hdr(skb);
275 		struct flowi6 fl6;
276 
277 		fl6.daddr = hdr->daddr;
278 		fl6.saddr = hdr->saddr;
279 		fl6.flowlabel = ip6_flowinfo(hdr);
280 		fl6.flowi6_mark = skb->mark;
281 		fl6.flowi6_proto = hdr->nexthdr;
282 
283 		dst = ip6_route_output(net, NULL, &fl6);
284 		if (dst->error) {
285 			err = dst->error;
286 			dst_release(dst);
287 			goto drop;
288 		}
289 
290 #ifdef CONFIG_DST_CACHE
291 		preempt_disable();
292 		dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
293 		preempt_enable();
294 #endif
295 	}
296 
297 	skb_dst_drop(skb);
298 	skb_dst_set(skb, dst);
299 
300 	return dst_output(net, sk, skb);
301 drop:
302 	kfree_skb(skb);
303 	return err;
304 }
305 
306 static int seg6_build_state(struct nlattr *nla,
307 			    unsigned int family, const void *cfg,
308 			    struct lwtunnel_state **ts)
309 {
310 	struct nlattr *tb[SEG6_IPTUNNEL_MAX + 1];
311 	struct seg6_iptunnel_encap *tuninfo;
312 	struct lwtunnel_state *newts;
313 	int tuninfo_len, min_size;
314 	struct seg6_lwt *slwt;
315 	int err;
316 
317 	err = nla_parse_nested(tb, SEG6_IPTUNNEL_MAX, nla,
318 			       seg6_iptunnel_policy);
319 
320 	if (err < 0)
321 		return err;
322 
323 	if (!tb[SEG6_IPTUNNEL_SRH])
324 		return -EINVAL;
325 
326 	tuninfo = nla_data(tb[SEG6_IPTUNNEL_SRH]);
327 	tuninfo_len = nla_len(tb[SEG6_IPTUNNEL_SRH]);
328 
329 	/* tuninfo must contain at least the iptunnel encap structure,
330 	 * the SRH and one segment
331 	 */
332 	min_size = sizeof(*tuninfo) + sizeof(struct ipv6_sr_hdr) +
333 		   sizeof(struct in6_addr);
334 	if (tuninfo_len < min_size)
335 		return -EINVAL;
336 
337 	switch (tuninfo->mode) {
338 #ifdef CONFIG_IPV6_SEG6_INLINE
339 	case SEG6_IPTUN_MODE_INLINE:
340 		break;
341 #endif
342 	case SEG6_IPTUN_MODE_ENCAP:
343 		break;
344 	default:
345 		return -EINVAL;
346 	}
347 
348 	/* verify that SRH is consistent */
349 	if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo)))
350 		return -EINVAL;
351 
352 	newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt));
353 	if (!newts)
354 		return -ENOMEM;
355 
356 	slwt = seg6_lwt_lwtunnel(newts);
357 
358 #ifdef CONFIG_DST_CACHE
359 	err = dst_cache_init(&slwt->cache, GFP_KERNEL);
360 	if (err) {
361 		kfree(newts);
362 		return err;
363 	}
364 #endif
365 
366 	memcpy(&slwt->tuninfo, tuninfo, tuninfo_len);
367 
368 	newts->type = LWTUNNEL_ENCAP_SEG6;
369 	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
370 			LWTUNNEL_STATE_INPUT_REDIRECT;
371 	newts->headroom = seg6_lwt_headroom(tuninfo);
372 
373 	*ts = newts;
374 
375 	return 0;
376 }
377 
378 #ifdef CONFIG_DST_CACHE
379 static void seg6_destroy_state(struct lwtunnel_state *lwt)
380 {
381 	dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache);
382 }
383 #endif
384 
385 static int seg6_fill_encap_info(struct sk_buff *skb,
386 				struct lwtunnel_state *lwtstate)
387 {
388 	struct seg6_iptunnel_encap *tuninfo = seg6_encap_lwtunnel(lwtstate);
389 
390 	if (nla_put_srh(skb, SEG6_IPTUNNEL_SRH, tuninfo))
391 		return -EMSGSIZE;
392 
393 	return 0;
394 }
395 
396 static int seg6_encap_nlsize(struct lwtunnel_state *lwtstate)
397 {
398 	struct seg6_iptunnel_encap *tuninfo = seg6_encap_lwtunnel(lwtstate);
399 
400 	return nla_total_size(SEG6_IPTUN_ENCAP_SIZE(tuninfo));
401 }
402 
403 static int seg6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
404 {
405 	struct seg6_iptunnel_encap *a_hdr = seg6_encap_lwtunnel(a);
406 	struct seg6_iptunnel_encap *b_hdr = seg6_encap_lwtunnel(b);
407 	int len = SEG6_IPTUN_ENCAP_SIZE(a_hdr);
408 
409 	if (len != SEG6_IPTUN_ENCAP_SIZE(b_hdr))
410 		return 1;
411 
412 	return memcmp(a_hdr, b_hdr, len);
413 }
414 
415 static const struct lwtunnel_encap_ops seg6_iptun_ops = {
416 	.build_state = seg6_build_state,
417 #ifdef CONFIG_DST_CACHE
418 	.destroy_state = seg6_destroy_state,
419 #endif
420 	.output = seg6_output,
421 	.input = seg6_input,
422 	.fill_encap = seg6_fill_encap_info,
423 	.get_encap_size = seg6_encap_nlsize,
424 	.cmp_encap = seg6_encap_cmp,
425 	.owner = THIS_MODULE,
426 };
427 
428 int __init seg6_iptunnel_init(void)
429 {
430 	return lwtunnel_encap_add_ops(&seg6_iptun_ops, LWTUNNEL_ENCAP_SEG6);
431 }
432 
433 void seg6_iptunnel_exit(void)
434 {
435 	lwtunnel_encap_del_ops(&seg6_iptun_ops, LWTUNNEL_ENCAP_SEG6);
436 }
437