1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * xfrm_output.c - Common IPsec encapsulation code.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <net/dst.h>
16 #include <net/gso.h>
17 #include <net/icmp.h>
18 #include <net/inet_ecn.h>
19 #include <net/xfrm.h>
20
21 #if IS_ENABLED(CONFIG_IPV6)
22 #include <net/ip6_route.h>
23 #endif
24
25 #include "xfrm_inout.h"
26
27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
29
xfrm_skb_check_space(struct sk_buff * skb)30 static int xfrm_skb_check_space(struct sk_buff *skb)
31 {
32 struct dst_entry *dst = skb_dst(skb);
33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
34 - skb_headroom(skb);
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
36
37 if (nhead <= 0) {
38 if (ntail <= 0)
39 return 0;
40 nhead = 0;
41 } else if (ntail < 0)
42 ntail = 0;
43
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
45 }
46
47 /* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
49 */
50
skb_dst_pop(struct sk_buff * skb)51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
52 {
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
54
55 skb_dst_drop(skb);
56 return child;
57 }
58
59 /* Add encapsulation header.
60 *
61 * The IP header will be moved forward to make space for the encapsulation
62 * header.
63 */
xfrm4_transport_output(struct xfrm_state * x,struct sk_buff * skb)64 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
65 {
66 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
68
69 if (!skb->inner_protocol)
70 skb_set_inner_transport_header(skb,
71 skb_transport_offset(skb));
72
73 skb_set_network_header(skb, -x->props.header_len);
74 skb->mac_header = skb->network_header +
75 offsetof(struct iphdr, protocol);
76 skb->transport_header = skb->network_header + ihl;
77 __skb_pull(skb, ihl);
78 memmove(skb_network_header(skb), iph, ihl);
79 return 0;
80 }
81
82 #if IS_ENABLED(CONFIG_IPV6_MIP6)
mip6_rthdr_offset(struct sk_buff * skb,u8 ** nexthdr,int type)83 static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
84 {
85 const unsigned char *nh = skb_network_header(skb);
86 unsigned int offset = sizeof(struct ipv6hdr);
87 unsigned int packet_len;
88 int found_rhdr = 0;
89
90 packet_len = skb_tail_pointer(skb) - nh;
91 *nexthdr = &ipv6_hdr(skb)->nexthdr;
92
93 while (offset <= packet_len) {
94 struct ipv6_opt_hdr *exthdr;
95
96 switch (**nexthdr) {
97 case NEXTHDR_HOP:
98 break;
99 case NEXTHDR_ROUTING:
100 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
101 struct ipv6_rt_hdr *rt;
102
103 rt = (struct ipv6_rt_hdr *)(nh + offset);
104 if (rt->type != 0)
105 return offset;
106 }
107 found_rhdr = 1;
108 break;
109 case NEXTHDR_DEST:
110 /* HAO MUST NOT appear more than once.
111 * XXX: It is better to try to find by the end of
112 * XXX: packet if HAO exists.
113 */
114 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
115 net_dbg_ratelimited("mip6: hao exists already, override\n");
116 return offset;
117 }
118
119 if (found_rhdr)
120 return offset;
121
122 break;
123 default:
124 return offset;
125 }
126
127 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
128 return -EINVAL;
129
130 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
131 offset);
132 offset += ipv6_optlen(exthdr);
133 if (offset > IPV6_MAXPLEN)
134 return -EINVAL;
135 *nexthdr = &exthdr->nexthdr;
136 }
137
138 return -EINVAL;
139 }
140 #endif
141
142 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_hdr_offset(struct xfrm_state * x,struct sk_buff * skb,u8 ** prevhdr)143 static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
144 {
145 switch (x->type->proto) {
146 #if IS_ENABLED(CONFIG_IPV6_MIP6)
147 case IPPROTO_DSTOPTS:
148 case IPPROTO_ROUTING:
149 return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
150 #endif
151 default:
152 break;
153 }
154
155 return ip6_find_1stfragopt(skb, prevhdr);
156 }
157 #endif
158
159 /* Add encapsulation header.
160 *
161 * The IP header and mutable extension headers will be moved forward to make
162 * space for the encapsulation header.
163 */
xfrm6_transport_output(struct xfrm_state * x,struct sk_buff * skb)164 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
165 {
166 #if IS_ENABLED(CONFIG_IPV6)
167 struct ipv6hdr *iph;
168 u8 *prevhdr;
169 int hdr_len;
170
171 iph = ipv6_hdr(skb);
172 if (!skb->inner_protocol)
173 skb_set_inner_transport_header(skb,
174 skb_transport_offset(skb));
175
176 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
177 if (hdr_len < 0)
178 return hdr_len;
179 skb_set_mac_header(skb,
180 (prevhdr - x->props.header_len) - skb->data);
181 skb_set_network_header(skb, -x->props.header_len);
182 skb->transport_header = skb->network_header + hdr_len;
183 __skb_pull(skb, hdr_len);
184 memmove(ipv6_hdr(skb), iph, hdr_len);
185 return 0;
186 #else
187 WARN_ON_ONCE(1);
188 return -EAFNOSUPPORT;
189 #endif
190 }
191
192 /* Add route optimization header space.
193 *
194 * The IP header and mutable extension headers will be moved forward to make
195 * space for the route optimization header.
196 */
xfrm6_ro_output(struct xfrm_state * x,struct sk_buff * skb)197 static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
198 {
199 #if IS_ENABLED(CONFIG_IPV6)
200 struct ipv6hdr *iph;
201 u8 *prevhdr;
202 int hdr_len;
203
204 iph = ipv6_hdr(skb);
205
206 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
207 if (hdr_len < 0)
208 return hdr_len;
209 skb_set_mac_header(skb,
210 (prevhdr - x->props.header_len) - skb->data);
211 skb_set_network_header(skb, -x->props.header_len);
212 skb->transport_header = skb->network_header + hdr_len;
213 __skb_pull(skb, hdr_len);
214 memmove(ipv6_hdr(skb), iph, hdr_len);
215
216 return 0;
217 #else
218 WARN_ON_ONCE(1);
219 return -EAFNOSUPPORT;
220 #endif
221 }
222
223 /* Add encapsulation header.
224 *
225 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
226 */
xfrm4_beet_encap_add(struct xfrm_state * x,struct sk_buff * skb)227 static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
228 {
229 struct ip_beet_phdr *ph;
230 struct iphdr *top_iph;
231 int hdrlen, optlen;
232
233 hdrlen = 0;
234 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
235 if (unlikely(optlen))
236 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
237
238 skb_set_network_header(skb, -x->props.header_len - hdrlen +
239 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
240 if (x->sel.family != AF_INET6)
241 skb->network_header += IPV4_BEET_PHMAXLEN;
242 skb->mac_header = skb->network_header +
243 offsetof(struct iphdr, protocol);
244 skb->transport_header = skb->network_header + sizeof(*top_iph);
245
246 xfrm4_beet_make_header(skb);
247
248 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
249
250 top_iph = ip_hdr(skb);
251
252 if (unlikely(optlen)) {
253 if (WARN_ON(optlen < 0))
254 return -EINVAL;
255
256 ph->padlen = 4 - (optlen & 4);
257 ph->hdrlen = optlen / 8;
258 ph->nexthdr = top_iph->protocol;
259 if (ph->padlen)
260 memset(ph + 1, IPOPT_NOP, ph->padlen);
261
262 top_iph->protocol = IPPROTO_BEETPH;
263 top_iph->ihl = sizeof(struct iphdr) / 4;
264 }
265
266 top_iph->saddr = x->props.saddr.a4;
267 top_iph->daddr = x->id.daddr.a4;
268
269 return 0;
270 }
271
272 /* Add encapsulation header.
273 *
274 * The top IP header will be constructed per RFC 2401.
275 */
xfrm4_tunnel_encap_add(struct xfrm_state * x,struct sk_buff * skb)276 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
277 {
278 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
279 struct dst_entry *dst = skb_dst(skb);
280 struct iphdr *top_iph;
281 int flags;
282
283 if (!skb->inner_protocol) {
284 skb_set_inner_network_header(skb, skb_network_offset(skb));
285 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
286 }
287
288 skb_set_network_header(skb, -x->props.header_len);
289 skb->mac_header = skb->network_header +
290 offsetof(struct iphdr, protocol);
291 skb->transport_header = skb->network_header + sizeof(*top_iph);
292 top_iph = ip_hdr(skb);
293
294 top_iph->ihl = 5;
295 top_iph->version = 4;
296
297 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
298
299 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
300 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
301 top_iph->tos = 0;
302 else
303 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
304 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
305 XFRM_MODE_SKB_CB(skb)->tos);
306
307 flags = x->props.flags;
308 if (flags & XFRM_STATE_NOECN)
309 IP_ECN_clear(top_iph);
310
311 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
312 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
313
314 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
315
316 top_iph->saddr = x->props.saddr.a4;
317 top_iph->daddr = x->id.daddr.a4;
318 ip_select_ident(dev_net(dst->dev), skb, NULL);
319
320 return 0;
321 }
322
323 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_tunnel_encap_add(struct xfrm_state * x,struct sk_buff * skb)324 static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
325 {
326 struct dst_entry *dst = skb_dst(skb);
327 struct ipv6hdr *top_iph;
328 int dsfield;
329
330 if (!skb->inner_protocol) {
331 skb_set_inner_network_header(skb, skb_network_offset(skb));
332 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
333 }
334
335 skb_set_network_header(skb, -x->props.header_len);
336 skb->mac_header = skb->network_header +
337 offsetof(struct ipv6hdr, nexthdr);
338 skb->transport_header = skb->network_header + sizeof(*top_iph);
339 top_iph = ipv6_hdr(skb);
340
341 top_iph->version = 6;
342
343 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
344 sizeof(top_iph->flow_lbl));
345 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
346
347 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
348 dsfield = 0;
349 else
350 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
351 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
352 if (x->props.flags & XFRM_STATE_NOECN)
353 dsfield &= ~INET_ECN_MASK;
354 ipv6_change_dsfield(top_iph, 0, dsfield);
355 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
356 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
357 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
358 return 0;
359 }
360
xfrm6_beet_encap_add(struct xfrm_state * x,struct sk_buff * skb)361 static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
362 {
363 struct ipv6hdr *top_iph;
364 struct ip_beet_phdr *ph;
365 int optlen, hdr_len;
366
367 hdr_len = 0;
368 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
369 if (unlikely(optlen))
370 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
371
372 skb_set_network_header(skb, -x->props.header_len - hdr_len);
373 if (x->sel.family != AF_INET6)
374 skb->network_header += IPV4_BEET_PHMAXLEN;
375 skb->mac_header = skb->network_header +
376 offsetof(struct ipv6hdr, nexthdr);
377 skb->transport_header = skb->network_header + sizeof(*top_iph);
378 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
379
380 xfrm6_beet_make_header(skb);
381
382 top_iph = ipv6_hdr(skb);
383 if (unlikely(optlen)) {
384 if (WARN_ON(optlen < 0))
385 return -EINVAL;
386
387 ph->padlen = 4 - (optlen & 4);
388 ph->hdrlen = optlen / 8;
389 ph->nexthdr = top_iph->nexthdr;
390 if (ph->padlen)
391 memset(ph + 1, IPOPT_NOP, ph->padlen);
392
393 top_iph->nexthdr = IPPROTO_BEETPH;
394 }
395
396 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
397 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
398 return 0;
399 }
400 #endif
401
402 /* Add encapsulation header.
403 *
404 * On exit, the transport header will be set to the start of the
405 * encapsulation header to be filled in by x->type->output and the mac
406 * header will be set to the nextheader (protocol for IPv4) field of the
407 * extension header directly preceding the encapsulation header, or in
408 * its absence, that of the top IP header.
409 * The value of the network header will always point to the top IP header
410 * while skb->data will point to the payload.
411 */
xfrm4_prepare_output(struct xfrm_state * x,struct sk_buff * skb)412 static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
413 {
414 int err;
415
416 err = xfrm_inner_extract_output(x, skb);
417 if (err)
418 return err;
419
420 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
421 skb->protocol = htons(ETH_P_IP);
422
423 switch (x->props.mode) {
424 case XFRM_MODE_BEET:
425 return xfrm4_beet_encap_add(x, skb);
426 case XFRM_MODE_TUNNEL:
427 return xfrm4_tunnel_encap_add(x, skb);
428 }
429
430 WARN_ON_ONCE(1);
431 return -EOPNOTSUPP;
432 }
433
xfrm6_prepare_output(struct xfrm_state * x,struct sk_buff * skb)434 static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
435 {
436 #if IS_ENABLED(CONFIG_IPV6)
437 int err;
438
439 err = xfrm_inner_extract_output(x, skb);
440 if (err)
441 return err;
442
443 skb->ignore_df = 1;
444 skb->protocol = htons(ETH_P_IPV6);
445
446 switch (x->props.mode) {
447 case XFRM_MODE_BEET:
448 return xfrm6_beet_encap_add(x, skb);
449 case XFRM_MODE_TUNNEL:
450 return xfrm6_tunnel_encap_add(x, skb);
451 default:
452 WARN_ON_ONCE(1);
453 return -EOPNOTSUPP;
454 }
455 #endif
456 WARN_ON_ONCE(1);
457 return -EAFNOSUPPORT;
458 }
459
xfrm_outer_mode_output(struct xfrm_state * x,struct sk_buff * skb)460 static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
461 {
462 switch (x->props.mode) {
463 case XFRM_MODE_BEET:
464 case XFRM_MODE_TUNNEL:
465 if (x->props.family == AF_INET)
466 return xfrm4_prepare_output(x, skb);
467 if (x->props.family == AF_INET6)
468 return xfrm6_prepare_output(x, skb);
469 break;
470 case XFRM_MODE_TRANSPORT:
471 if (x->props.family == AF_INET)
472 return xfrm4_transport_output(x, skb);
473 if (x->props.family == AF_INET6)
474 return xfrm6_transport_output(x, skb);
475 break;
476 case XFRM_MODE_ROUTEOPTIMIZATION:
477 if (x->props.family == AF_INET6)
478 return xfrm6_ro_output(x, skb);
479 WARN_ON_ONCE(1);
480 break;
481 default:
482 if (x->mode_cbs && x->mode_cbs->prepare_output)
483 return x->mode_cbs->prepare_output(x, skb);
484 WARN_ON_ONCE(1);
485 break;
486 }
487
488 return -EOPNOTSUPP;
489 }
490
491 #if IS_ENABLED(CONFIG_NET_PKTGEN)
pktgen_xfrm_outer_mode_output(struct xfrm_state * x,struct sk_buff * skb)492 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
493 {
494 return xfrm_outer_mode_output(x, skb);
495 }
496 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
497 #endif
498
xfrm_output_one(struct sk_buff * skb,int err)499 static int xfrm_output_one(struct sk_buff *skb, int err)
500 {
501 struct dst_entry *dst = skb_dst(skb);
502 struct xfrm_state *x = dst->xfrm;
503 struct net *net = xs_net(x);
504
505 if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
506 goto resume;
507
508 do {
509 err = xfrm_skb_check_space(skb);
510 if (err) {
511 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
512 goto error_nolock;
513 }
514
515 skb->mark = xfrm_smark_get(skb->mark, x);
516
517 err = xfrm_outer_mode_output(x, skb);
518 if (err) {
519 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
520 goto error_nolock;
521 }
522
523 spin_lock_bh(&x->lock);
524
525 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
526 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
527 err = -EINVAL;
528 goto error;
529 }
530
531 err = xfrm_state_check_expire(x);
532 if (err) {
533 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
534 goto error;
535 }
536
537 err = xfrm_replay_overflow(x, skb);
538 if (err) {
539 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
540 goto error;
541 }
542
543 x->curlft.bytes += skb->len;
544 x->curlft.packets++;
545 x->lastused = ktime_get_real_seconds();
546
547 spin_unlock_bh(&x->lock);
548
549 skb_dst_force(skb);
550 if (!skb_dst(skb)) {
551 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
552 err = -EHOSTUNREACH;
553 goto error_nolock;
554 }
555
556 if (xfrm_offload(skb)) {
557 x->type_offload->encap(x, skb);
558 } else {
559 /* Inner headers are invalid now. */
560 skb->encapsulation = 0;
561
562 err = x->type->output(x, skb);
563 if (err == -EINPROGRESS)
564 goto out;
565 }
566
567 resume:
568 if (err) {
569 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
570 goto error_nolock;
571 }
572
573 dst = skb_dst_pop(skb);
574 if (!dst) {
575 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
576 err = -EHOSTUNREACH;
577 goto error_nolock;
578 }
579 skb_dst_set(skb, dst);
580 x = dst->xfrm;
581 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
582
583 return 0;
584
585 error:
586 spin_unlock_bh(&x->lock);
587 error_nolock:
588 kfree_skb(skb);
589 out:
590 return err;
591 }
592
xfrm_output_resume(struct sock * sk,struct sk_buff * skb,int err)593 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
594 {
595 struct net *net = xs_net(skb_dst(skb)->xfrm);
596
597 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
598 nf_reset_ct(skb);
599
600 err = skb_dst(skb)->ops->local_out(net, sk, skb);
601 if (unlikely(err != 1))
602 goto out;
603
604 if (!skb_dst(skb)->xfrm)
605 return dst_output(net, sk, skb);
606
607 err = nf_hook(skb_dst(skb)->ops->family,
608 NF_INET_POST_ROUTING, net, sk, skb,
609 NULL, skb_dst(skb)->dev, xfrm_output2);
610 if (unlikely(err != 1))
611 goto out;
612 }
613
614 if (err == -EINPROGRESS)
615 err = 0;
616
617 out:
618 return err;
619 }
620 EXPORT_SYMBOL_GPL(xfrm_output_resume);
621
xfrm_dev_direct_output(struct sock * sk,struct xfrm_state * x,struct sk_buff * skb)622 static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x,
623 struct sk_buff *skb)
624 {
625 struct dst_entry *dst = skb_dst(skb);
626 struct net *net = xs_net(x);
627 int err;
628
629 dst = skb_dst_pop(skb);
630 if (!dst) {
631 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
632 kfree_skb(skb);
633 return -EHOSTUNREACH;
634 }
635 skb_dst_set(skb, dst);
636 nf_reset_ct(skb);
637
638 err = skb_dst(skb)->ops->local_out(net, sk, skb);
639 if (unlikely(err != 1)) {
640 kfree_skb(skb);
641 return err;
642 }
643
644 /* In transport mode, network destination is
645 * directly reachable, while in tunnel mode,
646 * inner packet network may not be. In packet
647 * offload type, HW is responsible for hard
648 * header packet mangling so directly xmit skb
649 * to netdevice.
650 */
651 skb->dev = x->xso.dev;
652 __skb_push(skb, skb->dev->hard_header_len);
653 return dev_queue_xmit(skb);
654 }
655
xfrm_output2(struct net * net,struct sock * sk,struct sk_buff * skb)656 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
657 {
658 return xfrm_output_resume(sk, skb, 1);
659 }
660
xfrm_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb)661 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
662 {
663 struct sk_buff *segs, *nskb;
664
665 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
666 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
667 segs = skb_gso_segment(skb, 0);
668 kfree_skb(skb);
669 if (IS_ERR(segs))
670 return PTR_ERR(segs);
671 if (segs == NULL)
672 return -EINVAL;
673
674 skb_list_walk_safe(segs, segs, nskb) {
675 int err;
676
677 skb_mark_not_on_list(segs);
678 err = xfrm_output2(net, sk, segs);
679
680 if (unlikely(err)) {
681 kfree_skb_list(nskb);
682 return err;
683 }
684 }
685
686 return 0;
687 }
688
689 /* For partial checksum offload, the outer header checksum is calculated
690 * by software and the inner header checksum is calculated by hardware.
691 * This requires hardware to know the inner packet type to calculate
692 * the inner header checksum. Save inner ip protocol here to avoid
693 * traversing the packet in the vendor's xmit code.
694 * For IPsec tunnel mode save the ip protocol from the IP header of the
695 * plain text packet. Otherwise If the encap type is IPIP, just save
696 * skb->inner_ipproto in any other case get the ip protocol from the IP
697 * header.
698 */
xfrm_get_inner_ipproto(struct sk_buff * skb,struct xfrm_state * x)699 static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
700 {
701 struct xfrm_offload *xo = xfrm_offload(skb);
702 const struct ethhdr *eth;
703
704 if (!xo)
705 return;
706
707 if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
708 switch (skb_dst(skb)->ops->family) {
709 case AF_INET:
710 xo->inner_ipproto = ip_hdr(skb)->protocol;
711 break;
712 case AF_INET6:
713 xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
714 break;
715 default:
716 break;
717 }
718
719 return;
720 }
721 if (x->outer_mode.encap == XFRM_MODE_IPTFS) {
722 xo->inner_ipproto = IPPROTO_AGGFRAG;
723 return;
724 }
725
726 /* non-Tunnel Mode */
727 if (!skb->encapsulation)
728 return;
729
730 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
731 xo->inner_ipproto = skb->inner_ipproto;
732 return;
733 }
734
735 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
736 return;
737
738 eth = (struct ethhdr *)skb_inner_mac_header(skb);
739
740 switch (ntohs(eth->h_proto)) {
741 case ETH_P_IPV6:
742 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
743 break;
744 case ETH_P_IP:
745 xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
746 break;
747 }
748 }
749
xfrm_output(struct sock * sk,struct sk_buff * skb)750 int xfrm_output(struct sock *sk, struct sk_buff *skb)
751 {
752 struct net *net = dev_net(skb_dst(skb)->dev);
753 struct xfrm_state *x = skb_dst(skb)->xfrm;
754 int family;
755 int err;
756
757 family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
758 : skb_dst(skb)->ops->family;
759
760 switch (family) {
761 case AF_INET:
762 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
763 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
764 break;
765 case AF_INET6:
766 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
767
768 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
769 break;
770 }
771
772 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
773 if (!xfrm_dev_offload_ok(skb, x)) {
774 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
775 kfree_skb(skb);
776 return -EHOSTUNREACH;
777 }
778
779 /* Exclusive direct xmit for tunnel mode, as
780 * some filtering or matching rules may apply
781 * in transport mode.
782 * Locally generated packets also require
783 * the normal XFRM path for L2 header setup,
784 * as the hardware needs the L2 header to match
785 * for encryption, so skip direct output as well.
786 */
787 if (x->props.mode == XFRM_MODE_TUNNEL && !skb->sk)
788 return xfrm_dev_direct_output(sk, x, skb);
789
790 return xfrm_output_resume(sk, skb, 0);
791 }
792
793 secpath_reset(skb);
794
795 if (xfrm_dev_offload_ok(skb, x)) {
796 struct sec_path *sp;
797
798 sp = secpath_set(skb);
799 if (!sp) {
800 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
801 kfree_skb(skb);
802 return -ENOMEM;
803 }
804
805 sp->olen++;
806 sp->xvec[sp->len++] = x;
807 xfrm_state_hold(x);
808
809 xfrm_get_inner_ipproto(skb, x);
810 skb->encapsulation = 1;
811
812 if (skb_is_gso(skb)) {
813 if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL)
814 return xfrm_output_gso(net, sk, skb);
815
816 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
817 goto out;
818 }
819
820 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
821 goto out;
822 } else {
823 if (skb_is_gso(skb))
824 return xfrm_output_gso(net, sk, skb);
825 }
826
827 if (skb->ip_summed == CHECKSUM_PARTIAL) {
828 err = skb_checksum_help(skb);
829 if (err) {
830 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
831 kfree_skb(skb);
832 return err;
833 }
834 }
835
836 out:
837 return xfrm_output2(net, sk, skb);
838 }
839 EXPORT_SYMBOL_GPL(xfrm_output);
840
xfrm4_tunnel_check_size(struct sk_buff * skb)841 int xfrm4_tunnel_check_size(struct sk_buff *skb)
842 {
843 int mtu, ret = 0;
844
845 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
846 goto out;
847
848 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
849 goto out;
850
851 mtu = dst_mtu(skb_dst(skb));
852 if ((!skb_is_gso(skb) && skb->len > mtu) ||
853 (skb_is_gso(skb) &&
854 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
855 skb->protocol = htons(ETH_P_IP);
856
857 if (skb->sk && sk_fullsock(skb->sk))
858 xfrm_local_error(skb, mtu);
859 else
860 icmp_send(skb, ICMP_DEST_UNREACH,
861 ICMP_FRAG_NEEDED, htonl(mtu));
862 ret = -EMSGSIZE;
863 }
864 out:
865 return ret;
866 }
867 EXPORT_SYMBOL_GPL(xfrm4_tunnel_check_size);
868
xfrm4_extract_output(struct xfrm_state * x,struct sk_buff * skb)869 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
870 {
871 int err;
872
873 if (x->outer_mode.encap == XFRM_MODE_BEET &&
874 ip_is_fragment(ip_hdr(skb))) {
875 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
876 return -EAFNOSUPPORT;
877 }
878
879 err = xfrm4_tunnel_check_size(skb);
880 if (err)
881 return err;
882
883 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
884
885 xfrm4_extract_header(skb);
886 return 0;
887 }
888
889 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_tunnel_check_size(struct sk_buff * skb)890 int xfrm6_tunnel_check_size(struct sk_buff *skb)
891 {
892 int mtu, ret = 0;
893 struct dst_entry *dst = skb_dst(skb);
894 struct sock *sk = skb_to_full_sk(skb);
895
896 if (skb->ignore_df)
897 goto out;
898
899 mtu = dst_mtu(dst);
900 if (mtu < IPV6_MIN_MTU)
901 mtu = IPV6_MIN_MTU;
902
903 if ((!skb_is_gso(skb) && skb->len > mtu) ||
904 (skb_is_gso(skb) &&
905 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
906 skb->dev = dst->dev;
907 skb->protocol = htons(ETH_P_IPV6);
908
909 if (xfrm6_local_dontfrag(sk))
910 xfrm6_local_rxpmtu(skb, mtu);
911 else if (sk)
912 xfrm_local_error(skb, mtu);
913 else
914 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
915 ret = -EMSGSIZE;
916 }
917 out:
918 return ret;
919 }
920 EXPORT_SYMBOL_GPL(xfrm6_tunnel_check_size);
921 #endif
922
xfrm6_extract_output(struct xfrm_state * x,struct sk_buff * skb)923 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
924 {
925 #if IS_ENABLED(CONFIG_IPV6)
926 int err;
927
928 err = xfrm6_tunnel_check_size(skb);
929 if (err)
930 return err;
931
932 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
933
934 xfrm6_extract_header(skb);
935 return 0;
936 #else
937 WARN_ON_ONCE(1);
938 return -EAFNOSUPPORT;
939 #endif
940 }
941
xfrm_inner_extract_output(struct xfrm_state * x,struct sk_buff * skb)942 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
943 {
944 switch (skb->protocol) {
945 case htons(ETH_P_IP):
946 return xfrm4_extract_output(x, skb);
947 case htons(ETH_P_IPV6):
948 return xfrm6_extract_output(x, skb);
949 }
950
951 return -EAFNOSUPPORT;
952 }
953
xfrm_local_error(struct sk_buff * skb,int mtu)954 void xfrm_local_error(struct sk_buff *skb, int mtu)
955 {
956 unsigned int proto;
957 struct xfrm_state_afinfo *afinfo;
958
959 if (skb->protocol == htons(ETH_P_IP))
960 proto = AF_INET;
961 else if (skb->protocol == htons(ETH_P_IPV6) &&
962 skb->sk->sk_family == AF_INET6)
963 proto = AF_INET6;
964 else
965 return;
966
967 afinfo = xfrm_state_get_afinfo(proto);
968 if (afinfo) {
969 afinfo->local_error(skb, mtu);
970 rcu_read_unlock();
971 }
972 }
973 EXPORT_SYMBOL_GPL(xfrm_local_error);
974