1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Extension Header handling for IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 * Andi Kleen <ak@muc.de>
9 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 */
11
12 /* Changes:
13 * yoshfuji : ensure not to overrun while parsing
14 * tlv options.
15 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
16 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
17 * handlers as inet6_protocol{}.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/netdevice.h>
26 #include <linux/in6.h>
27 #include <linux/icmpv6.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/snmp.h>
34
35 #include <net/ipv6.h>
36 #include <net/protocol.h>
37 #include <net/transp_v6.h>
38 #include <net/rawv6.h>
39 #include <net/ndisc.h>
40 #include <net/ip6_route.h>
41 #include <net/addrconf.h>
42 #include <net/calipso.h>
43 #if IS_ENABLED(CONFIG_IPV6_MIP6)
44 #include <net/xfrm.h>
45 #endif
46 #include <linux/seg6.h>
47 #include <net/seg6.h>
48 #ifdef CONFIG_IPV6_SEG6_HMAC
49 #include <net/seg6_hmac.h>
50 #endif
51 #include <net/rpl.h>
52 #include <linux/ioam6.h>
53 #include <linux/ioam6_genl.h>
54 #include <net/ioam6.h>
55 #include <net/dst_metadata.h>
56
57 #include <linux/uaccess.h>
58
59 /*********************
60 Generic functions
61 *********************/
62
63 /* An unknown option is detected, decide what to do */
64
ip6_tlvopt_unknown(struct sk_buff * skb,int optoff,bool disallow_unknowns)65 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
66 bool disallow_unknowns)
67 {
68 if (disallow_unknowns) {
69 /* If unknown TLVs are disallowed by configuration
70 * then always silently drop packet. Note this also
71 * means no ICMP parameter problem is sent which
72 * could be a good property to mitigate a reflection DOS
73 * attack.
74 */
75
76 goto drop;
77 }
78
79 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
80 case 0: /* ignore */
81 return true;
82
83 case 1: /* drop packet */
84 break;
85
86 case 3: /* Send ICMP if not a multicast address and drop packet */
87 /* Actually, it is redundant check. icmp_send
88 will recheck in any case.
89 */
90 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
91 break;
92 fallthrough;
93 case 2: /* send ICMP PARM PROB regardless and drop packet */
94 icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff,
95 SKB_DROP_REASON_UNHANDLED_PROTO);
96 return false;
97 }
98
99 drop:
100 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
101 return false;
102 }
103
104 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff);
105 static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff);
106 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff);
107 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff);
108 #if IS_ENABLED(CONFIG_IPV6_MIP6)
109 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff);
110 #endif
111
112 /* Parse tlv encoded option header (hop-by-hop or destination) */
113
ip6_parse_tlv(bool hopbyhop,struct sk_buff * skb,int max_count)114 static bool ip6_parse_tlv(bool hopbyhop,
115 struct sk_buff *skb,
116 int max_count)
117 {
118 int len = (skb_transport_header(skb)[1] + 1) << 3;
119 const unsigned char *nh = skb_network_header(skb);
120 int off = skb_network_header_len(skb);
121 bool disallow_unknowns = false;
122 int tlv_count = 0;
123 int padlen = 0;
124
125 if (unlikely(max_count < 0)) {
126 disallow_unknowns = true;
127 max_count = -max_count;
128 }
129
130 off += 2;
131 len -= 2;
132
133 while (len > 0) {
134 int optlen, i;
135
136 if (nh[off] == IPV6_TLV_PAD1) {
137 padlen++;
138 if (padlen > 7)
139 goto bad;
140 off++;
141 len--;
142 continue;
143 }
144 if (len < 2)
145 goto bad;
146 optlen = nh[off + 1] + 2;
147 if (optlen > len)
148 goto bad;
149
150 if (nh[off] == IPV6_TLV_PADN) {
151 /* RFC 2460 states that the purpose of PadN is
152 * to align the containing header to multiples
153 * of 8. 7 is therefore the highest valid value.
154 * See also RFC 4942, Section 2.1.9.5.
155 */
156 padlen += optlen;
157 if (padlen > 7)
158 goto bad;
159 /* RFC 4942 recommends receiving hosts to
160 * actively check PadN payload to contain
161 * only zeroes.
162 */
163 for (i = 2; i < optlen; i++) {
164 if (nh[off + i] != 0)
165 goto bad;
166 }
167 } else {
168 tlv_count++;
169 if (tlv_count > max_count)
170 goto bad;
171
172 if (hopbyhop) {
173 switch (nh[off]) {
174 case IPV6_TLV_ROUTERALERT:
175 if (!ipv6_hop_ra(skb, off))
176 return false;
177 break;
178 case IPV6_TLV_IOAM:
179 if (!ipv6_hop_ioam(skb, off))
180 return false;
181
182 nh = skb_network_header(skb);
183 break;
184 case IPV6_TLV_JUMBO:
185 if (!ipv6_hop_jumbo(skb, off))
186 return false;
187 break;
188 case IPV6_TLV_CALIPSO:
189 if (!ipv6_hop_calipso(skb, off))
190 return false;
191 break;
192 default:
193 if (!ip6_tlvopt_unknown(skb, off,
194 disallow_unknowns))
195 return false;
196 break;
197 }
198 } else {
199 switch (nh[off]) {
200 #if IS_ENABLED(CONFIG_IPV6_MIP6)
201 case IPV6_TLV_HAO:
202 if (!ipv6_dest_hao(skb, off))
203 return false;
204 break;
205 #endif
206 default:
207 if (!ip6_tlvopt_unknown(skb, off,
208 disallow_unknowns))
209 return false;
210 break;
211 }
212 }
213 padlen = 0;
214 }
215 off += optlen;
216 len -= optlen;
217 }
218
219 if (len == 0)
220 return true;
221 bad:
222 kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
223 return false;
224 }
225
226 /*****************************
227 Destination options header.
228 *****************************/
229
230 #if IS_ENABLED(CONFIG_IPV6_MIP6)
ipv6_dest_hao(struct sk_buff * skb,int optoff)231 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
232 {
233 struct ipv6_destopt_hao *hao;
234 struct inet6_skb_parm *opt = IP6CB(skb);
235 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
236 SKB_DR(reason);
237 int ret;
238
239 if (opt->dsthao) {
240 net_dbg_ratelimited("hao duplicated\n");
241 goto discard;
242 }
243 opt->dsthao = opt->dst1;
244 opt->dst1 = 0;
245
246 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
247
248 if (hao->length != 16) {
249 net_dbg_ratelimited("hao invalid option length = %d\n",
250 hao->length);
251 SKB_DR_SET(reason, IP_INHDR);
252 goto discard;
253 }
254
255 if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
256 net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
257 &hao->addr);
258 SKB_DR_SET(reason, INVALID_PROTO);
259 goto discard;
260 }
261
262 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
263 (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
264 if (unlikely(ret < 0)) {
265 SKB_DR_SET(reason, XFRM_POLICY);
266 goto discard;
267 }
268
269 if (skb_cloned(skb)) {
270 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
271 goto discard;
272
273 /* update all variable using below by copied skbuff */
274 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
275 optoff);
276 ipv6h = ipv6_hdr(skb);
277 }
278
279 if (skb->ip_summed == CHECKSUM_COMPLETE)
280 skb->ip_summed = CHECKSUM_NONE;
281
282 swap(ipv6h->saddr, hao->addr);
283
284 if (skb->tstamp == 0)
285 __net_timestamp(skb);
286
287 return true;
288
289 discard:
290 kfree_skb_reason(skb, reason);
291 return false;
292 }
293 #endif
294
ipv6_destopt_rcv(struct sk_buff * skb)295 static int ipv6_destopt_rcv(struct sk_buff *skb)
296 {
297 struct inet6_dev *idev = __in6_dev_get(skb->dev);
298 struct inet6_skb_parm *opt = IP6CB(skb);
299 #if IS_ENABLED(CONFIG_IPV6_MIP6)
300 __u16 dstbuf;
301 #endif
302 struct dst_entry *dst = skb_dst(skb);
303 struct net *net = dev_net(skb->dev);
304 int extlen;
305
306 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
307 !pskb_may_pull(skb, (skb_transport_offset(skb) +
308 ((skb_transport_header(skb)[1] + 1) << 3)))) {
309 __IP6_INC_STATS(dev_net(dst_dev(dst)), idev,
310 IPSTATS_MIB_INHDRERRORS);
311 fail_and_free:
312 kfree_skb(skb);
313 return -1;
314 }
315
316 extlen = (skb_transport_header(skb)[1] + 1) << 3;
317 if (extlen > READ_ONCE(net->ipv6.sysctl.max_dst_opts_len))
318 goto fail_and_free;
319
320 opt->lastopt = opt->dst1 = skb_network_header_len(skb);
321 #if IS_ENABLED(CONFIG_IPV6_MIP6)
322 dstbuf = opt->dst1;
323 #endif
324
325 if (ip6_parse_tlv(false, skb,
326 READ_ONCE(net->ipv6.sysctl.max_dst_opts_cnt))) {
327 skb->transport_header += extlen;
328 opt = IP6CB(skb);
329 #if IS_ENABLED(CONFIG_IPV6_MIP6)
330 opt->nhoff = dstbuf;
331 #else
332 opt->nhoff = opt->dst1;
333 #endif
334 return 1;
335 }
336
337 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
338 return -1;
339 }
340
seg6_update_csum(struct sk_buff * skb)341 static void seg6_update_csum(struct sk_buff *skb)
342 {
343 struct ipv6_sr_hdr *hdr;
344 struct in6_addr *addr;
345 __be32 from, to;
346
347 /* srh is at transport offset and seg_left is already decremented
348 * but daddr is not yet updated with next segment
349 */
350
351 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
352 addr = hdr->segments + hdr->segments_left;
353
354 hdr->segments_left++;
355 from = *(__be32 *)hdr;
356
357 hdr->segments_left--;
358 to = *(__be32 *)hdr;
359
360 /* update skb csum with diff resulting from seg_left decrement */
361
362 update_csum_diff4(skb, from, to);
363
364 /* compute csum diff between current and next segment and update */
365
366 update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
367 (__be32 *)addr);
368 }
369
ipv6_srh_rcv(struct sk_buff * skb)370 static int ipv6_srh_rcv(struct sk_buff *skb)
371 {
372 struct inet6_skb_parm *opt = IP6CB(skb);
373 struct net *net = dev_net(skb->dev);
374 struct ipv6_sr_hdr *hdr;
375 struct inet6_dev *idev;
376 struct in6_addr *addr;
377 int accept_seg6;
378
379 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
380
381 idev = __in6_dev_get(skb->dev);
382
383 accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
384 READ_ONCE(idev->cnf.seg6_enabled));
385
386 if (!accept_seg6) {
387 kfree_skb(skb);
388 return -1;
389 }
390
391 #ifdef CONFIG_IPV6_SEG6_HMAC
392 if (!seg6_hmac_validate_skb(skb)) {
393 kfree_skb(skb);
394 return -1;
395 }
396 #endif
397
398 looped_back:
399 if (hdr->segments_left == 0) {
400 if (hdr->nexthdr == NEXTHDR_IPV6 || hdr->nexthdr == NEXTHDR_IPV4) {
401 int offset = (hdr->hdrlen + 1) << 3;
402
403 skb_postpull_rcsum(skb, skb_network_header(skb),
404 skb_network_header_len(skb));
405 skb_pull(skb, offset);
406 skb_postpull_rcsum(skb, skb_transport_header(skb),
407 offset);
408
409 skb_reset_network_header(skb);
410 skb_reset_transport_header(skb);
411 skb->encapsulation = 0;
412 if (hdr->nexthdr == NEXTHDR_IPV4)
413 skb->protocol = htons(ETH_P_IP);
414 __skb_tunnel_rx(skb, skb->dev, net);
415
416 netif_rx(skb);
417 return -1;
418 }
419
420 opt->srcrt = skb_network_header_len(skb);
421 opt->lastopt = opt->srcrt;
422 skb->transport_header += (hdr->hdrlen + 1) << 3;
423 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
424
425 return 1;
426 }
427
428 if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
429 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
430 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
431 ((&hdr->segments_left) -
432 skb_network_header(skb)));
433 return -1;
434 }
435
436 if (skb_cloned(skb)) {
437 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
438 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
439 IPSTATS_MIB_OUTDISCARDS);
440 kfree_skb(skb);
441 return -1;
442 }
443
444 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
445 }
446
447 hdr->segments_left--;
448 addr = hdr->segments + hdr->segments_left;
449
450 skb_push(skb, sizeof(struct ipv6hdr));
451
452 if (skb->ip_summed == CHECKSUM_COMPLETE)
453 seg6_update_csum(skb);
454
455 ipv6_hdr(skb)->daddr = *addr;
456
457 ip6_route_input(skb);
458
459 if (skb_dst(skb)->error) {
460 dst_input(skb);
461 return -1;
462 }
463
464 if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
465 if (ipv6_hdr(skb)->hop_limit <= 1) {
466 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
467 icmpv6_send(skb, ICMPV6_TIME_EXCEED,
468 ICMPV6_EXC_HOPLIMIT, 0);
469 kfree_skb(skb);
470 return -1;
471 }
472 ipv6_hdr(skb)->hop_limit--;
473
474 skb_pull(skb, sizeof(struct ipv6hdr));
475 goto looped_back;
476 }
477
478 dst_input(skb);
479
480 return -1;
481 }
482
ipv6_rpl_srh_rcv(struct sk_buff * skb)483 static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
484 {
485 struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr;
486 struct inet6_skb_parm *opt = IP6CB(skb);
487 struct net *net = dev_net(skb->dev);
488 struct inet6_dev *idev;
489 struct ipv6hdr *oldhdr;
490 unsigned char *buf;
491 int accept_rpl_seg;
492 int i, err;
493 u64 n = 0;
494 u32 r;
495
496 idev = __in6_dev_get(skb->dev);
497
498 accept_rpl_seg = min(READ_ONCE(net->ipv6.devconf_all->rpl_seg_enabled),
499 READ_ONCE(idev->cnf.rpl_seg_enabled));
500 if (!accept_rpl_seg) {
501 kfree_skb(skb);
502 return -1;
503 }
504
505 looped_back:
506 hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
507
508 if (hdr->segments_left == 0) {
509 if (hdr->nexthdr == NEXTHDR_IPV6) {
510 int offset = (hdr->hdrlen + 1) << 3;
511
512 skb_postpull_rcsum(skb, skb_network_header(skb),
513 skb_network_header_len(skb));
514 skb_pull(skb, offset);
515 skb_postpull_rcsum(skb, skb_transport_header(skb),
516 offset);
517
518 skb_reset_network_header(skb);
519 skb_reset_transport_header(skb);
520 skb->encapsulation = 0;
521
522 __skb_tunnel_rx(skb, skb->dev, net);
523
524 netif_rx(skb);
525 return -1;
526 }
527
528 opt->srcrt = skb_network_header_len(skb);
529 opt->lastopt = opt->srcrt;
530 skb->transport_header += (hdr->hdrlen + 1) << 3;
531 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
532
533 return 1;
534 }
535
536 n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre);
537 r = do_div(n, (16 - hdr->cmpri));
538 /* checks if calculation was without remainder and n fits into
539 * unsigned char which is segments_left field. Should not be
540 * higher than that.
541 */
542 if (r || (n + 1) > 255) {
543 kfree_skb(skb);
544 return -1;
545 }
546
547 if (hdr->segments_left > n + 1) {
548 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
549 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
550 ((&hdr->segments_left) -
551 skb_network_header(skb)));
552 return -1;
553 }
554
555 hdr->segments_left--;
556 i = n - hdr->segments_left;
557
558 buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC);
559 if (unlikely(!buf)) {
560 kfree_skb(skb);
561 return -1;
562 }
563
564 ohdr = (struct ipv6_rpl_sr_hdr *)buf;
565 ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n);
566 chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3));
567
568 if (ipv6_addr_is_multicast(&ohdr->rpl_segaddr[i])) {
569 kfree_skb(skb);
570 kfree(buf);
571 return -1;
572 }
573
574 err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1);
575 if (err) {
576 icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0);
577 kfree_skb(skb);
578 kfree(buf);
579 return -1;
580 }
581
582 swap(ipv6_hdr(skb)->daddr, ohdr->rpl_segaddr[i]);
583
584 ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
585
586 oldhdr = ipv6_hdr(skb);
587
588 skb_pull(skb, ((hdr->hdrlen + 1) << 3));
589 skb_postpull_rcsum(skb, oldhdr,
590 sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
591 if (unlikely(!hdr->segments_left)) {
592 if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
593 GFP_ATOMIC)) {
594 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
595 kfree_skb(skb);
596 kfree(buf);
597 return -1;
598 }
599
600 oldhdr = ipv6_hdr(skb);
601 }
602 skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
603 skb_reset_network_header(skb);
604 skb_mac_header_rebuild(skb);
605 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
606
607 memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr));
608 memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3);
609
610 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
611 skb_postpush_rcsum(skb, ipv6_hdr(skb),
612 sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3));
613
614 kfree(buf);
615
616 ip6_route_input(skb);
617
618 if (skb_dst(skb)->error) {
619 dst_input(skb);
620 return -1;
621 }
622
623 if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
624 if (ipv6_hdr(skb)->hop_limit <= 1) {
625 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
626 icmpv6_send(skb, ICMPV6_TIME_EXCEED,
627 ICMPV6_EXC_HOPLIMIT, 0);
628 kfree_skb(skb);
629 return -1;
630 }
631 ipv6_hdr(skb)->hop_limit--;
632
633 skb_pull(skb, sizeof(struct ipv6hdr));
634 goto looped_back;
635 }
636
637 dst_input(skb);
638
639 return -1;
640 }
641
642 /********************************
643 Routing header.
644 ********************************/
645
646 /* called with rcu_read_lock() */
ipv6_rthdr_rcv(struct sk_buff * skb)647 static int ipv6_rthdr_rcv(struct sk_buff *skb)
648 {
649 struct inet6_dev *idev = __in6_dev_get(skb->dev);
650 struct inet6_skb_parm *opt = IP6CB(skb);
651 struct in6_addr *addr = NULL;
652 int n, i;
653 struct ipv6_rt_hdr *hdr;
654 struct rt0_hdr *rthdr;
655 struct net *net = dev_net(skb->dev);
656 int accept_source_route;
657
658 accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
659
660 if (idev)
661 accept_source_route = min(accept_source_route,
662 READ_ONCE(idev->cnf.accept_source_route));
663
664 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
665 !pskb_may_pull(skb, (skb_transport_offset(skb) +
666 ((skb_transport_header(skb)[1] + 1) << 3)))) {
667 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
668 kfree_skb(skb);
669 return -1;
670 }
671
672 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
673
674 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
675 skb->pkt_type != PACKET_HOST) {
676 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
677 kfree_skb(skb);
678 return -1;
679 }
680
681 switch (hdr->type) {
682 case IPV6_SRCRT_TYPE_4:
683 /* segment routing */
684 return ipv6_srh_rcv(skb);
685 case IPV6_SRCRT_TYPE_3:
686 /* rpl segment routing */
687 return ipv6_rpl_srh_rcv(skb);
688 default:
689 break;
690 }
691
692 looped_back:
693 if (hdr->segments_left == 0) {
694 switch (hdr->type) {
695 #if IS_ENABLED(CONFIG_IPV6_MIP6)
696 case IPV6_SRCRT_TYPE_2:
697 /* Silently discard type 2 header unless it was
698 * processed by own
699 */
700 if (!addr) {
701 __IP6_INC_STATS(net, idev,
702 IPSTATS_MIB_INADDRERRORS);
703 kfree_skb(skb);
704 return -1;
705 }
706 break;
707 #endif
708 default:
709 break;
710 }
711
712 opt->lastopt = opt->srcrt = skb_network_header_len(skb);
713 skb->transport_header += (hdr->hdrlen + 1) << 3;
714 opt->dst0 = opt->dst1;
715 opt->dst1 = 0;
716 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
717 return 1;
718 }
719
720 switch (hdr->type) {
721 #if IS_ENABLED(CONFIG_IPV6_MIP6)
722 case IPV6_SRCRT_TYPE_2:
723 if (accept_source_route < 0)
724 goto unknown_rh;
725 /* Silently discard invalid RTH type 2 */
726 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
727 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
728 kfree_skb(skb);
729 return -1;
730 }
731 break;
732 #endif
733 default:
734 goto unknown_rh;
735 }
736
737 /*
738 * This is the routing header forwarding algorithm from
739 * RFC 2460, page 16.
740 */
741
742 n = hdr->hdrlen >> 1;
743
744 if (hdr->segments_left > n) {
745 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
746 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
747 ((&hdr->segments_left) -
748 skb_network_header(skb)));
749 return -1;
750 }
751
752 /* We are about to mangle packet header. Be careful!
753 Do not damage packets queued somewhere.
754 */
755 if (skb_cloned(skb)) {
756 /* the copy is a forwarded packet */
757 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
758 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
759 IPSTATS_MIB_OUTDISCARDS);
760 kfree_skb(skb);
761 return -1;
762 }
763 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
764 }
765
766 if (skb->ip_summed == CHECKSUM_COMPLETE)
767 skb->ip_summed = CHECKSUM_NONE;
768
769 i = n - --hdr->segments_left;
770
771 rthdr = (struct rt0_hdr *) hdr;
772 addr = rthdr->addr;
773 addr += i - 1;
774
775 switch (hdr->type) {
776 #if IS_ENABLED(CONFIG_IPV6_MIP6)
777 case IPV6_SRCRT_TYPE_2:
778 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
779 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
780 IPPROTO_ROUTING) < 0) {
781 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
782 kfree_skb(skb);
783 return -1;
784 }
785 if (!ipv6_chk_home_addr(skb_dst_dev_net(skb), addr)) {
786 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
787 kfree_skb(skb);
788 return -1;
789 }
790 break;
791 #endif
792 default:
793 break;
794 }
795
796 if (ipv6_addr_is_multicast(addr)) {
797 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
798 kfree_skb(skb);
799 return -1;
800 }
801
802 swap(*addr, ipv6_hdr(skb)->daddr);
803
804 ip6_route_input(skb);
805 if (skb_dst(skb)->error) {
806 skb_push(skb, -skb_network_offset(skb));
807 dst_input(skb);
808 return -1;
809 }
810
811 if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
812 if (ipv6_hdr(skb)->hop_limit <= 1) {
813 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
814 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
815 0);
816 kfree_skb(skb);
817 return -1;
818 }
819 ipv6_hdr(skb)->hop_limit--;
820 goto looped_back;
821 }
822
823 skb_push(skb, -skb_network_offset(skb));
824 dst_input(skb);
825 return -1;
826
827 unknown_rh:
828 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
829 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
830 (&hdr->type) - skb_network_header(skb));
831 return -1;
832 }
833
834 static const struct inet6_protocol rthdr_protocol = {
835 .handler = ipv6_rthdr_rcv,
836 .flags = INET6_PROTO_NOPOLICY,
837 };
838
839 static const struct inet6_protocol destopt_protocol = {
840 .handler = ipv6_destopt_rcv,
841 .flags = INET6_PROTO_NOPOLICY,
842 };
843
844 static const struct inet6_protocol nodata_protocol = {
845 .handler = dst_discard,
846 .flags = INET6_PROTO_NOPOLICY,
847 };
848
ipv6_exthdrs_init(void)849 int __init ipv6_exthdrs_init(void)
850 {
851 int ret;
852
853 ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
854 if (ret)
855 goto out;
856
857 ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
858 if (ret)
859 goto out_rthdr;
860
861 ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
862 if (ret)
863 goto out_destopt;
864
865 out:
866 return ret;
867 out_destopt:
868 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
869 out_rthdr:
870 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
871 goto out;
872 };
873
ipv6_exthdrs_exit(void)874 void ipv6_exthdrs_exit(void)
875 {
876 inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
877 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
878 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
879 }
880
881 /**********************************
882 Hop-by-hop options.
883 **********************************/
884
885 /* Router Alert as of RFC 2711 */
886
ipv6_hop_ra(struct sk_buff * skb,int optoff)887 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
888 {
889 const unsigned char *nh = skb_network_header(skb);
890
891 if (nh[optoff + 1] == 2) {
892 IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
893 memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
894 return true;
895 }
896 net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
897 nh[optoff + 1]);
898 kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
899 return false;
900 }
901
902 /* IOAM */
903
ipv6_hop_ioam(struct sk_buff * skb,int optoff)904 static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
905 {
906 struct ioam6_trace_hdr *trace;
907 struct ioam6_namespace *ns;
908 struct ioam6_hdr *hdr;
909
910 /* Bad alignment (must be 4n-aligned) */
911 if (optoff & 3)
912 goto drop;
913
914 /* Ignore if IOAM is not enabled on ingress */
915 if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
916 goto ignore;
917
918 /* Truncated Option header */
919 hdr = (struct ioam6_hdr *)(skb_network_header(skb) + optoff);
920 if (hdr->opt_len < 2)
921 goto drop;
922
923 switch (hdr->type) {
924 case IOAM6_TYPE_PREALLOC:
925 /* Truncated Pre-allocated Trace header */
926 if (hdr->opt_len < 2 + sizeof(*trace))
927 goto drop;
928
929 /* Malformed Pre-allocated Trace header */
930 trace = (struct ioam6_trace_hdr *)((u8 *)hdr + sizeof(*hdr));
931 if (hdr->opt_len < 2 + sizeof(*trace) + trace->remlen * 4)
932 goto drop;
933
934 /* Inconsistent Pre-allocated Trace header */
935 if (trace->nodelen !=
936 ioam6_trace_compute_nodelen(be32_to_cpu(trace->type_be32)))
937 goto drop;
938
939 /* Ignore if the IOAM namespace is unknown */
940 ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id);
941 if (!ns)
942 goto ignore;
943
944 if (!skb_valid_dst(skb))
945 ip6_route_input(skb);
946
947 /* About to mangle packet header */
948 if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
949 goto drop;
950
951 /* Trace pointer may have changed */
952 trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
953 + optoff + sizeof(*hdr));
954
955 ioam6_fill_trace_data(skb, ns, trace, true);
956
957 ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev),
958 GFP_ATOMIC, (void *)trace, hdr->opt_len - 2);
959 break;
960 default:
961 break;
962 }
963
964 ignore:
965 return true;
966
967 drop:
968 kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
969 return false;
970 }
971
972 /* Jumbo payload */
973
ipv6_hop_jumbo(struct sk_buff * skb,int optoff)974 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
975 {
976 const unsigned char *nh = skb_network_header(skb);
977 SKB_DR(reason);
978 u32 pkt_len;
979
980 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
981 net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
982 nh[optoff+1]);
983 SKB_DR_SET(reason, IP_INHDR);
984 goto drop;
985 }
986
987 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
988 if (pkt_len <= IPV6_MAXPLEN) {
989 icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff + 2,
990 SKB_DROP_REASON_IP_INHDR);
991 return false;
992 }
993 if (ipv6_hdr(skb)->payload_len) {
994 icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff,
995 SKB_DROP_REASON_IP_INHDR);
996 return false;
997 }
998
999 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
1000 SKB_DR_SET(reason, PKT_TOO_SMALL);
1001 goto drop;
1002 }
1003
1004 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
1005 goto drop;
1006
1007 IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
1008 return true;
1009
1010 drop:
1011 kfree_skb_reason(skb, reason);
1012 return false;
1013 }
1014
1015 /* CALIPSO RFC 5570 */
1016
ipv6_hop_calipso(struct sk_buff * skb,int optoff)1017 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
1018 {
1019 const unsigned char *nh = skb_network_header(skb);
1020
1021 if (nh[optoff + 1] < 8)
1022 goto drop;
1023
1024 if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
1025 goto drop;
1026
1027 if (!calipso_validate(skb, nh + optoff))
1028 goto drop;
1029
1030 return true;
1031
1032 drop:
1033 kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
1034 return false;
1035 }
1036
ipv6_parse_hopopts(struct sk_buff * skb)1037 int ipv6_parse_hopopts(struct sk_buff *skb)
1038 {
1039 struct inet6_skb_parm *opt = IP6CB(skb);
1040 struct net *net = dev_net(skb->dev);
1041 int extlen;
1042
1043 /*
1044 * skb_network_header(skb) is equal to skb->data, and
1045 * skb_network_header_len(skb) is always equal to
1046 * sizeof(struct ipv6hdr) by definition of
1047 * hop-by-hop options.
1048 */
1049 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
1050 !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
1051 ((skb_transport_header(skb)[1] + 1) << 3)))) {
1052 fail_and_free:
1053 kfree_skb(skb);
1054 return -1;
1055 }
1056
1057 extlen = (skb_transport_header(skb)[1] + 1) << 3;
1058 if (extlen > READ_ONCE(net->ipv6.sysctl.max_hbh_opts_len))
1059 goto fail_and_free;
1060
1061 opt->flags |= IP6SKB_HOPBYHOP;
1062 if (ip6_parse_tlv(true, skb,
1063 READ_ONCE(net->ipv6.sysctl.max_hbh_opts_cnt))) {
1064 skb->transport_header += extlen;
1065 opt = IP6CB(skb);
1066 opt->nhoff = sizeof(struct ipv6hdr);
1067 return 1;
1068 }
1069 return -1;
1070 }
1071
1072 /*
1073 * Creating outbound headers.
1074 *
1075 * "build" functions work when skb is filled from head to tail (datagram)
1076 * "push" functions work when headers are added from tail to head (tcp)
1077 *
1078 * In both cases we assume, that caller reserved enough room
1079 * for headers.
1080 */
1081
ipv6_push_rthdr0(struct sk_buff * skb,u8 proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1082 static u8 ipv6_push_rthdr0(struct sk_buff *skb, u8 proto,
1083 struct ipv6_rt_hdr *opt,
1084 struct in6_addr **addr_p, struct in6_addr *saddr)
1085 {
1086 struct rt0_hdr *phdr, *ihdr;
1087 int hops;
1088
1089 ihdr = (struct rt0_hdr *) opt;
1090
1091 phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
1092 memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
1093
1094 hops = ihdr->rt_hdr.hdrlen >> 1;
1095
1096 if (hops > 1)
1097 memcpy(phdr->addr, ihdr->addr + 1,
1098 (hops - 1) * sizeof(struct in6_addr));
1099
1100 phdr->addr[hops - 1] = **addr_p;
1101 *addr_p = ihdr->addr;
1102
1103 phdr->rt_hdr.nexthdr = proto;
1104 return NEXTHDR_ROUTING;
1105 }
1106
ipv6_push_rthdr4(struct sk_buff * skb,u8 proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1107 static u8 ipv6_push_rthdr4(struct sk_buff *skb, u8 proto,
1108 struct ipv6_rt_hdr *opt,
1109 struct in6_addr **addr_p, struct in6_addr *saddr)
1110 {
1111 struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
1112 int plen, hops;
1113
1114 sr_ihdr = (struct ipv6_sr_hdr *)opt;
1115 plen = (sr_ihdr->hdrlen + 1) << 3;
1116
1117 sr_phdr = skb_push(skb, plen);
1118 memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
1119
1120 hops = sr_ihdr->first_segment + 1;
1121 memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
1122 (hops - 1) * sizeof(struct in6_addr));
1123
1124 sr_phdr->segments[0] = **addr_p;
1125 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
1126
1127 if (sr_ihdr->hdrlen > hops * 2) {
1128 int tlvs_offset, tlvs_length;
1129
1130 tlvs_offset = (1 + hops * 2) << 3;
1131 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
1132 memcpy((char *)sr_phdr + tlvs_offset,
1133 (char *)sr_ihdr + tlvs_offset, tlvs_length);
1134 }
1135
1136 #ifdef CONFIG_IPV6_SEG6_HMAC
1137 if (sr_has_hmac(sr_phdr)) {
1138 struct net *net = NULL;
1139
1140 if (skb->dev)
1141 net = dev_net(skb->dev);
1142 else if (skb->sk)
1143 net = sock_net(skb->sk);
1144
1145 WARN_ON(!net);
1146
1147 if (net)
1148 seg6_push_hmac(net, saddr, sr_phdr);
1149 }
1150 #endif
1151
1152 sr_phdr->nexthdr = proto;
1153 return NEXTHDR_ROUTING;
1154 }
1155
ipv6_push_rthdr(struct sk_buff * skb,u8 proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1156 static u8 ipv6_push_rthdr(struct sk_buff *skb, u8 proto,
1157 struct ipv6_rt_hdr *opt,
1158 struct in6_addr **addr_p, struct in6_addr *saddr)
1159 {
1160 switch (opt->type) {
1161 case IPV6_SRCRT_TYPE_0:
1162 case IPV6_SRCRT_STRICT:
1163 case IPV6_SRCRT_TYPE_2:
1164 proto = ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
1165 break;
1166 case IPV6_SRCRT_TYPE_4:
1167 proto = ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
1168 break;
1169 default:
1170 break;
1171 }
1172 return proto;
1173 }
1174
ipv6_push_exthdr(struct sk_buff * skb,u8 proto,u8 type,struct ipv6_opt_hdr * opt)1175 static u8 ipv6_push_exthdr(struct sk_buff *skb, u8 proto, u8 type, struct ipv6_opt_hdr *opt)
1176 {
1177 struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
1178
1179 memcpy(h, opt, ipv6_optlen(opt));
1180 h->nexthdr = proto;
1181 return type;
1182 }
1183
ipv6_push_nfrag_opts(struct sk_buff * skb,struct ipv6_txoptions * opt,u8 proto,struct in6_addr ** daddr,struct in6_addr * saddr)1184 u8 ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
1185 u8 proto,
1186 struct in6_addr **daddr, struct in6_addr *saddr)
1187 {
1188 if (opt->srcrt) {
1189 proto = ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
1190 /*
1191 * IPV6_RTHDRDSTOPTS is ignored
1192 * unless IPV6_RTHDR is set (RFC3542).
1193 */
1194 if (opt->dst0opt)
1195 proto = ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
1196 }
1197 if (opt->hopopt)
1198 proto = ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
1199 return proto;
1200 }
1201
ipv6_push_frag_opts(struct sk_buff * skb,struct ipv6_txoptions * opt,u8 proto)1202 u8 ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 proto)
1203 {
1204 if (opt->dst1opt)
1205 proto = ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
1206 return proto;
1207 }
1208 EXPORT_SYMBOL(ipv6_push_frag_opts);
1209
1210 struct ipv6_txoptions *
ipv6_dup_options(struct sock * sk,struct ipv6_txoptions * opt)1211 ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1212 {
1213 struct ipv6_txoptions *opt2;
1214
1215 opt2 = sock_kmemdup(sk, opt, opt->tot_len, GFP_ATOMIC);
1216 if (opt2) {
1217 long dif = (char *)opt2 - (char *)opt;
1218 if (opt2->hopopt)
1219 *((char **)&opt2->hopopt) += dif;
1220 if (opt2->dst0opt)
1221 *((char **)&opt2->dst0opt) += dif;
1222 if (opt2->dst1opt)
1223 *((char **)&opt2->dst1opt) += dif;
1224 if (opt2->srcrt)
1225 *((char **)&opt2->srcrt) += dif;
1226 refcount_set(&opt2->refcnt, 1);
1227 }
1228 return opt2;
1229 }
1230 EXPORT_SYMBOL_GPL(ipv6_dup_options);
1231
ipv6_renew_option(int renewtype,struct ipv6_opt_hdr ** dest,struct ipv6_opt_hdr * old,struct ipv6_opt_hdr * new,int newtype,char ** p)1232 static void ipv6_renew_option(int renewtype,
1233 struct ipv6_opt_hdr **dest,
1234 struct ipv6_opt_hdr *old,
1235 struct ipv6_opt_hdr *new,
1236 int newtype, char **p)
1237 {
1238 struct ipv6_opt_hdr *src;
1239
1240 src = (renewtype == newtype ? new : old);
1241 if (!src)
1242 return;
1243
1244 memcpy(*p, src, ipv6_optlen(src));
1245 *dest = (struct ipv6_opt_hdr *)*p;
1246 *p += CMSG_ALIGN(ipv6_optlen(*dest));
1247 }
1248
1249 /**
1250 * ipv6_renew_options - replace a specific ext hdr with a new one.
1251 *
1252 * @sk: sock from which to allocate memory
1253 * @opt: original options
1254 * @newtype: option type to replace in @opt
1255 * @newopt: new option of type @newtype to replace (user-mem)
1256 *
1257 * Returns a new set of options which is a copy of @opt with the
1258 * option type @newtype replaced with @newopt.
1259 *
1260 * @opt may be NULL, in which case a new set of options is returned
1261 * containing just @newopt.
1262 *
1263 * @newopt may be NULL, in which case the specified option type is
1264 * not copied into the new set of options.
1265 *
1266 * The new set of options is allocated from the socket option memory
1267 * buffer of @sk.
1268 */
1269 struct ipv6_txoptions *
ipv6_renew_options(struct sock * sk,struct ipv6_txoptions * opt,int newtype,struct ipv6_opt_hdr * newopt)1270 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1271 int newtype, struct ipv6_opt_hdr *newopt)
1272 {
1273 int tot_len = 0;
1274 char *p;
1275 struct ipv6_txoptions *opt2;
1276
1277 if (opt) {
1278 if (newtype != IPV6_HOPOPTS && opt->hopopt)
1279 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
1280 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
1281 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
1282 if (newtype != IPV6_RTHDR && opt->srcrt)
1283 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
1284 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
1285 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1286 }
1287
1288 if (newopt)
1289 tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
1290
1291 if (!tot_len)
1292 return NULL;
1293
1294 tot_len += sizeof(*opt2);
1295 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
1296 if (!opt2)
1297 return ERR_PTR(-ENOBUFS);
1298
1299 memset(opt2, 0, tot_len);
1300 refcount_set(&opt2->refcnt, 1);
1301 opt2->tot_len = tot_len;
1302 p = (char *)(opt2 + 1);
1303
1304 ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
1305 (opt ? opt->hopopt : NULL),
1306 newopt, newtype, &p);
1307 ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
1308 (opt ? opt->dst0opt : NULL),
1309 newopt, newtype, &p);
1310 ipv6_renew_option(IPV6_RTHDR,
1311 (struct ipv6_opt_hdr **)&opt2->srcrt,
1312 (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
1313 newopt, newtype, &p);
1314 ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
1315 (opt ? opt->dst1opt : NULL),
1316 newopt, newtype, &p);
1317
1318 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
1319 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
1320 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
1321 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
1322
1323 return opt2;
1324 }
1325
__ipv6_fixup_options(struct ipv6_txoptions * opt_space,struct ipv6_txoptions * opt)1326 struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space,
1327 struct ipv6_txoptions *opt)
1328 {
1329 /*
1330 * ignore the dest before srcrt unless srcrt is being included.
1331 * --yoshfuji
1332 */
1333 if (opt->dst0opt && !opt->srcrt) {
1334 if (opt_space != opt) {
1335 memcpy(opt_space, opt, sizeof(*opt_space));
1336 opt = opt_space;
1337 }
1338 opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
1339 opt->dst0opt = NULL;
1340 }
1341
1342 return opt;
1343 }
1344 EXPORT_SYMBOL_GPL(__ipv6_fixup_options);
1345
1346 /**
1347 * __fl6_update_dst - update flowi destination address with info given
1348 * by srcrt option, if any.
1349 *
1350 * @fl6: flowi6 for which daddr is to be updated
1351 * @opt: struct ipv6_txoptions in which to look for srcrt opt
1352 * @orig: copy of original daddr address if modified
1353 *
1354 * Return: NULL if no srcrt or invalid srcrt type, otherwise returns orig
1355 * and initial value of fl6->daddr set in orig
1356 */
__fl6_update_dst(struct flowi6 * fl6,const struct ipv6_txoptions * opt,struct in6_addr * orig)1357 struct in6_addr *__fl6_update_dst(struct flowi6 *fl6,
1358 const struct ipv6_txoptions *opt,
1359 struct in6_addr *orig)
1360 {
1361 if (!opt->srcrt)
1362 return NULL;
1363
1364 *orig = fl6->daddr;
1365
1366 switch (opt->srcrt->type) {
1367 case IPV6_SRCRT_TYPE_0:
1368 case IPV6_SRCRT_STRICT:
1369 case IPV6_SRCRT_TYPE_2:
1370 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
1371 break;
1372 case IPV6_SRCRT_TYPE_4:
1373 {
1374 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
1375
1376 fl6->daddr = srh->segments[srh->segments_left];
1377 break;
1378 }
1379 default:
1380 return NULL;
1381 }
1382
1383 return orig;
1384 }
1385 EXPORT_SYMBOL_GPL(__fl6_update_dst);
1386