1 /* 2 * Extension Header handling for IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Andi Kleen <ak@muc.de> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 * 10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $ 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 /* Changes: 19 * yoshfuji : ensure not to overrun while parsing 20 * tlv options. 21 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). 22 * YOSHIFUJI Hideaki @USAGI Register inbound extension header 23 * handlers as inet6_protocol{}. 24 */ 25 26 #include <linux/errno.h> 27 #include <linux/types.h> 28 #include <linux/socket.h> 29 #include <linux/sockios.h> 30 #include <linux/sched.h> 31 #include <linux/net.h> 32 #include <linux/netdevice.h> 33 #include <linux/in6.h> 34 #include <linux/icmpv6.h> 35 36 #include <net/sock.h> 37 #include <net/snmp.h> 38 39 #include <net/ipv6.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/rawv6.h> 43 #include <net/ndisc.h> 44 #include <net/ip6_route.h> 45 #include <net/addrconf.h> 46 47 #include <asm/uaccess.h> 48 49 /* 50 * Parsing tlv encoded headers. 51 * 52 * Parsing function "func" returns 1, if parsing succeed 53 * and 0, if it failed. 54 * It MUST NOT touch skb->h. 55 */ 56 57 struct tlvtype_proc { 58 int type; 59 int (*func)(struct sk_buff *skb, int offset); 60 }; 61 62 /********************* 63 Generic functions 64 *********************/ 65 66 /* An unknown option is detected, decide what to do */ 67 68 static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) 69 { 70 switch ((skb->nh.raw[optoff] & 0xC0) >> 6) { 71 case 0: /* ignore */ 72 return 1; 73 74 case 1: /* drop packet */ 75 break; 76 77 case 3: /* Send ICMP if not a multicast address and drop packet */ 78 /* Actually, it is redundant check. icmp_send 79 will recheck in any case. 80 */ 81 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) 82 break; 83 case 2: /* send ICMP PARM PROB regardless and drop packet */ 84 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); 85 return 0; 86 }; 87 88 kfree_skb(skb); 89 return 0; 90 } 91 92 /* Parse tlv encoded option header (hop-by-hop or destination) */ 93 94 static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) 95 { 96 struct tlvtype_proc *curr; 97 int off = skb->h.raw - skb->nh.raw; 98 int len = ((skb->h.raw[1]+1)<<3); 99 100 if ((skb->h.raw + len) - skb->data > skb_headlen(skb)) 101 goto bad; 102 103 off += 2; 104 len -= 2; 105 106 while (len > 0) { 107 int optlen = skb->nh.raw[off+1]+2; 108 109 switch (skb->nh.raw[off]) { 110 case IPV6_TLV_PAD0: 111 optlen = 1; 112 break; 113 114 case IPV6_TLV_PADN: 115 break; 116 117 default: /* Other TLV code so scan list */ 118 if (optlen > len) 119 goto bad; 120 for (curr=procs; curr->type >= 0; curr++) { 121 if (curr->type == skb->nh.raw[off]) { 122 /* type specific length/alignment 123 checks will be performed in the 124 func(). */ 125 if (curr->func(skb, off) == 0) 126 return 0; 127 break; 128 } 129 } 130 if (curr->type < 0) { 131 if (ip6_tlvopt_unknown(skb, off) == 0) 132 return 0; 133 } 134 break; 135 } 136 off += optlen; 137 len -= optlen; 138 } 139 if (len == 0) 140 return 1; 141 bad: 142 kfree_skb(skb); 143 return 0; 144 } 145 146 /***************************** 147 Destination options header. 148 *****************************/ 149 150 static struct tlvtype_proc tlvprocdestopt_lst[] = { 151 /* No destination options are defined now */ 152 {-1, NULL} 153 }; 154 155 static int ipv6_destopt_rcv(struct sk_buff **skbp, unsigned int *nhoffp) 156 { 157 struct sk_buff *skb = *skbp; 158 struct inet6_skb_parm *opt = IP6CB(skb); 159 160 if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || 161 !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { 162 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 163 kfree_skb(skb); 164 return -1; 165 } 166 167 opt->lastopt = skb->h.raw - skb->nh.raw; 168 opt->dst1 = skb->h.raw - skb->nh.raw; 169 170 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 171 skb->h.raw += ((skb->h.raw[1]+1)<<3); 172 *nhoffp = opt->dst1; 173 return 1; 174 } 175 176 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 177 return -1; 178 } 179 180 static struct inet6_protocol destopt_protocol = { 181 .handler = ipv6_destopt_rcv, 182 .flags = INET6_PROTO_NOPOLICY, 183 }; 184 185 void __init ipv6_destopt_init(void) 186 { 187 if (inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS) < 0) 188 printk(KERN_ERR "ipv6_destopt_init: Could not register protocol\n"); 189 } 190 191 /******************************** 192 NONE header. No data in packet. 193 ********************************/ 194 195 static int ipv6_nodata_rcv(struct sk_buff **skbp, unsigned int *nhoffp) 196 { 197 struct sk_buff *skb = *skbp; 198 199 kfree_skb(skb); 200 return 0; 201 } 202 203 static struct inet6_protocol nodata_protocol = { 204 .handler = ipv6_nodata_rcv, 205 .flags = INET6_PROTO_NOPOLICY, 206 }; 207 208 void __init ipv6_nodata_init(void) 209 { 210 if (inet6_add_protocol(&nodata_protocol, IPPROTO_NONE) < 0) 211 printk(KERN_ERR "ipv6_nodata_init: Could not register protocol\n"); 212 } 213 214 /******************************** 215 Routing header. 216 ********************************/ 217 218 static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp) 219 { 220 struct sk_buff *skb = *skbp; 221 struct inet6_skb_parm *opt = IP6CB(skb); 222 struct in6_addr *addr; 223 struct in6_addr daddr; 224 int n, i; 225 226 struct ipv6_rt_hdr *hdr; 227 struct rt0_hdr *rthdr; 228 229 if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || 230 !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { 231 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 232 kfree_skb(skb); 233 return -1; 234 } 235 236 hdr = (struct ipv6_rt_hdr *) skb->h.raw; 237 238 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) || 239 skb->pkt_type != PACKET_HOST) { 240 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 241 kfree_skb(skb); 242 return -1; 243 } 244 245 looped_back: 246 if (hdr->segments_left == 0) { 247 opt->lastopt = skb->h.raw - skb->nh.raw; 248 opt->srcrt = skb->h.raw - skb->nh.raw; 249 skb->h.raw += (hdr->hdrlen + 1) << 3; 250 opt->dst0 = opt->dst1; 251 opt->dst1 = 0; 252 *nhoffp = (&hdr->nexthdr) - skb->nh.raw; 253 return 1; 254 } 255 256 if (hdr->type != IPV6_SRCRT_TYPE_0) { 257 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 258 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); 259 return -1; 260 } 261 262 if (hdr->hdrlen & 0x01) { 263 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 264 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw); 265 return -1; 266 } 267 268 /* 269 * This is the routing header forwarding algorithm from 270 * RFC 2460, page 16. 271 */ 272 273 n = hdr->hdrlen >> 1; 274 275 if (hdr->segments_left > n) { 276 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 277 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw); 278 return -1; 279 } 280 281 /* We are about to mangle packet header. Be careful! 282 Do not damage packets queued somewhere. 283 */ 284 if (skb_cloned(skb)) { 285 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); 286 kfree_skb(skb); 287 /* the copy is a forwarded packet */ 288 if (skb2 == NULL) { 289 IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS); 290 return -1; 291 } 292 *skbp = skb = skb2; 293 opt = IP6CB(skb2); 294 hdr = (struct ipv6_rt_hdr *) skb2->h.raw; 295 } 296 297 if (skb->ip_summed == CHECKSUM_HW) 298 skb->ip_summed = CHECKSUM_NONE; 299 300 i = n - --hdr->segments_left; 301 302 rthdr = (struct rt0_hdr *) hdr; 303 addr = rthdr->addr; 304 addr += i - 1; 305 306 if (ipv6_addr_is_multicast(addr)) { 307 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 308 kfree_skb(skb); 309 return -1; 310 } 311 312 ipv6_addr_copy(&daddr, addr); 313 ipv6_addr_copy(addr, &skb->nh.ipv6h->daddr); 314 ipv6_addr_copy(&skb->nh.ipv6h->daddr, &daddr); 315 316 dst_release(xchg(&skb->dst, NULL)); 317 ip6_route_input(skb); 318 if (skb->dst->error) { 319 skb_push(skb, skb->data - skb->nh.raw); 320 dst_input(skb); 321 return -1; 322 } 323 324 if (skb->dst->dev->flags&IFF_LOOPBACK) { 325 if (skb->nh.ipv6h->hop_limit <= 1) { 326 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 327 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 328 0, skb->dev); 329 kfree_skb(skb); 330 return -1; 331 } 332 skb->nh.ipv6h->hop_limit--; 333 goto looped_back; 334 } 335 336 skb_push(skb, skb->data - skb->nh.raw); 337 dst_input(skb); 338 return -1; 339 } 340 341 static struct inet6_protocol rthdr_protocol = { 342 .handler = ipv6_rthdr_rcv, 343 .flags = INET6_PROTO_NOPOLICY, 344 }; 345 346 void __init ipv6_rthdr_init(void) 347 { 348 if (inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING) < 0) 349 printk(KERN_ERR "ipv6_rthdr_init: Could not register protocol\n"); 350 }; 351 352 /* 353 This function inverts received rthdr. 354 NOTE: specs allow to make it automatically only if 355 packet authenticated. 356 357 I will not discuss it here (though, I am really pissed off at 358 this stupid requirement making rthdr idea useless) 359 360 Actually, it creates severe problems for us. 361 Embryonic requests has no associated sockets, 362 so that user have no control over it and 363 cannot not only to set reply options, but 364 even to know, that someone wants to connect 365 without success. :-( 366 367 For now we need to test the engine, so that I created 368 temporary (or permanent) backdoor. 369 If listening socket set IPV6_RTHDR to 2, then we invert header. 370 --ANK (980729) 371 */ 372 373 struct ipv6_txoptions * 374 ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr) 375 { 376 /* Received rthdr: 377 378 [ H1 -> H2 -> ... H_prev ] daddr=ME 379 380 Inverted result: 381 [ H_prev -> ... -> H1 ] daddr =sender 382 383 Note, that IP output engine will rewrite this rthdr 384 by rotating it left by one addr. 385 */ 386 387 int n, i; 388 struct rt0_hdr *rthdr = (struct rt0_hdr*)hdr; 389 struct rt0_hdr *irthdr; 390 struct ipv6_txoptions *opt; 391 int hdrlen = ipv6_optlen(hdr); 392 393 if (hdr->segments_left || 394 hdr->type != IPV6_SRCRT_TYPE_0 || 395 hdr->hdrlen & 0x01) 396 return NULL; 397 398 n = hdr->hdrlen >> 1; 399 opt = sock_kmalloc(sk, sizeof(*opt) + hdrlen, GFP_ATOMIC); 400 if (opt == NULL) 401 return NULL; 402 memset(opt, 0, sizeof(*opt)); 403 opt->tot_len = sizeof(*opt) + hdrlen; 404 opt->srcrt = (void*)(opt+1); 405 opt->opt_nflen = hdrlen; 406 407 memcpy(opt->srcrt, hdr, sizeof(*hdr)); 408 irthdr = (struct rt0_hdr*)opt->srcrt; 409 /* Obsolete field, MBZ, when originated by us */ 410 irthdr->bitmap = 0; 411 opt->srcrt->segments_left = n; 412 for (i=0; i<n; i++) 413 memcpy(irthdr->addr+i, rthdr->addr+(n-1-i), 16); 414 return opt; 415 } 416 417 /********************************** 418 Hop-by-hop options. 419 **********************************/ 420 421 /* Router Alert as of RFC 2711 */ 422 423 static int ipv6_hop_ra(struct sk_buff *skb, int optoff) 424 { 425 if (skb->nh.raw[optoff+1] == 2) { 426 IP6CB(skb)->ra = optoff; 427 return 1; 428 } 429 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 430 skb->nh.raw[optoff+1]); 431 kfree_skb(skb); 432 return 0; 433 } 434 435 /* Jumbo payload */ 436 437 static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) 438 { 439 u32 pkt_len; 440 441 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { 442 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 443 skb->nh.raw[optoff+1]); 444 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 445 goto drop; 446 } 447 448 pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2)); 449 if (pkt_len <= IPV6_MAXPLEN) { 450 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 451 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); 452 return 0; 453 } 454 if (skb->nh.ipv6h->payload_len) { 455 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 456 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); 457 return 0; 458 } 459 460 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { 461 IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); 462 goto drop; 463 } 464 465 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 466 goto drop; 467 468 return 1; 469 470 drop: 471 kfree_skb(skb); 472 return 0; 473 } 474 475 static struct tlvtype_proc tlvprochopopt_lst[] = { 476 { 477 .type = IPV6_TLV_ROUTERALERT, 478 .func = ipv6_hop_ra, 479 }, 480 { 481 .type = IPV6_TLV_JUMBO, 482 .func = ipv6_hop_jumbo, 483 }, 484 { -1, } 485 }; 486 487 int ipv6_parse_hopopts(struct sk_buff *skb, int nhoff) 488 { 489 IP6CB(skb)->hop = sizeof(struct ipv6hdr); 490 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) 491 return sizeof(struct ipv6hdr); 492 return -1; 493 } 494 495 /* 496 * Creating outbound headers. 497 * 498 * "build" functions work when skb is filled from head to tail (datagram) 499 * "push" functions work when headers are added from tail to head (tcp) 500 * 501 * In both cases we assume, that caller reserved enough room 502 * for headers. 503 */ 504 505 static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, 506 struct ipv6_rt_hdr *opt, 507 struct in6_addr **addr_p) 508 { 509 struct rt0_hdr *phdr, *ihdr; 510 int hops; 511 512 ihdr = (struct rt0_hdr *) opt; 513 514 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); 515 memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); 516 517 hops = ihdr->rt_hdr.hdrlen >> 1; 518 519 if (hops > 1) 520 memcpy(phdr->addr, ihdr->addr + 1, 521 (hops - 1) * sizeof(struct in6_addr)); 522 523 ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p); 524 *addr_p = ihdr->addr; 525 526 phdr->rt_hdr.nexthdr = *proto; 527 *proto = NEXTHDR_ROUTING; 528 } 529 530 static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) 531 { 532 struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt)); 533 534 memcpy(h, opt, ipv6_optlen(opt)); 535 h->nexthdr = *proto; 536 *proto = type; 537 } 538 539 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, 540 u8 *proto, 541 struct in6_addr **daddr) 542 { 543 if (opt->srcrt) { 544 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr); 545 /* 546 * IPV6_RTHDRDSTOPTS is ignored 547 * unless IPV6_RTHDR is set (RFC3542). 548 */ 549 if (opt->dst0opt) 550 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); 551 } 552 if (opt->hopopt) 553 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); 554 } 555 556 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) 557 { 558 if (opt->dst1opt) 559 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); 560 } 561 562 struct ipv6_txoptions * 563 ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) 564 { 565 struct ipv6_txoptions *opt2; 566 567 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); 568 if (opt2) { 569 long dif = (char*)opt2 - (char*)opt; 570 memcpy(opt2, opt, opt->tot_len); 571 if (opt2->hopopt) 572 *((char**)&opt2->hopopt) += dif; 573 if (opt2->dst0opt) 574 *((char**)&opt2->dst0opt) += dif; 575 if (opt2->dst1opt) 576 *((char**)&opt2->dst1opt) += dif; 577 if (opt2->srcrt) 578 *((char**)&opt2->srcrt) += dif; 579 } 580 return opt2; 581 } 582 583 static int ipv6_renew_option(void *ohdr, 584 struct ipv6_opt_hdr __user *newopt, int newoptlen, 585 int inherit, 586 struct ipv6_opt_hdr **hdr, 587 char **p) 588 { 589 if (inherit) { 590 if (ohdr) { 591 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); 592 *hdr = (struct ipv6_opt_hdr *)*p; 593 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); 594 } 595 } else { 596 if (newopt) { 597 if (copy_from_user(*p, newopt, newoptlen)) 598 return -EFAULT; 599 *hdr = (struct ipv6_opt_hdr *)*p; 600 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) 601 return -EINVAL; 602 *p += CMSG_ALIGN(newoptlen); 603 } 604 } 605 return 0; 606 } 607 608 struct ipv6_txoptions * 609 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, 610 int newtype, 611 struct ipv6_opt_hdr __user *newopt, int newoptlen) 612 { 613 int tot_len = 0; 614 char *p; 615 struct ipv6_txoptions *opt2; 616 int err; 617 618 if (newtype != IPV6_HOPOPTS && opt->hopopt) 619 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); 620 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) 621 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); 622 if (newtype != IPV6_RTHDR && opt->srcrt) 623 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); 624 if (newtype != IPV6_DSTOPTS && opt->dst1opt) 625 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); 626 if (newopt && newoptlen) 627 tot_len += CMSG_ALIGN(newoptlen); 628 629 if (!tot_len) 630 return NULL; 631 632 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); 633 if (!opt2) 634 return ERR_PTR(-ENOBUFS); 635 636 memset(opt2, 0, tot_len); 637 638 opt2->tot_len = tot_len; 639 p = (char *)(opt2 + 1); 640 641 err = ipv6_renew_option(opt->hopopt, newopt, newoptlen, 642 newtype != IPV6_HOPOPTS, 643 &opt2->hopopt, &p); 644 if (err) 645 goto out; 646 647 err = ipv6_renew_option(opt->dst0opt, newopt, newoptlen, 648 newtype != IPV6_RTHDRDSTOPTS, 649 &opt2->dst0opt, &p); 650 if (err) 651 goto out; 652 653 err = ipv6_renew_option(opt->srcrt, newopt, newoptlen, 654 newtype != IPV6_RTHDR, 655 (struct ipv6_opt_hdr **)opt2->srcrt, &p); 656 if (err) 657 goto out; 658 659 err = ipv6_renew_option(opt->dst1opt, newopt, newoptlen, 660 newtype != IPV6_DSTOPTS, 661 &opt2->dst1opt, &p); 662 if (err) 663 goto out; 664 665 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + 666 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + 667 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); 668 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); 669 670 return opt2; 671 out: 672 sock_kfree_s(sk, p, tot_len); 673 return ERR_PTR(err); 674 } 675 676