1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/if_arp.h> 9 #include <linux/netdevice.h> 10 #include <linux/if.h> 11 #include <linux/if_vlan.h> 12 #include <net/udp_tunnel.h> 13 #include <net/sch_generic.h> 14 #include <linux/netfilter.h> 15 #include <rdma/ib_addr.h> 16 17 #include "rxe.h" 18 #include "rxe_net.h" 19 #include "rxe_loc.h" 20 21 static struct rxe_recv_sockets recv_sockets; 22 23 static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, 24 struct net_device *ndev, 25 struct in_addr *saddr, 26 struct in_addr *daddr) 27 { 28 struct rtable *rt; 29 struct flowi4 fl = { { 0 } }; 30 31 memset(&fl, 0, sizeof(fl)); 32 fl.flowi4_oif = ndev->ifindex; 33 memcpy(&fl.saddr, saddr, sizeof(*saddr)); 34 memcpy(&fl.daddr, daddr, sizeof(*daddr)); 35 fl.flowi4_proto = IPPROTO_UDP; 36 37 rt = ip_route_output_key(&init_net, &fl); 38 if (IS_ERR(rt)) { 39 rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr); 40 return NULL; 41 } 42 43 return &rt->dst; 44 } 45 46 #if IS_ENABLED(CONFIG_IPV6) 47 static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, 48 struct net_device *ndev, 49 struct in6_addr *saddr, 50 struct in6_addr *daddr) 51 { 52 struct dst_entry *ndst; 53 struct flowi6 fl6 = { { 0 } }; 54 55 memset(&fl6, 0, sizeof(fl6)); 56 fl6.flowi6_oif = ndev->ifindex; 57 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 58 memcpy(&fl6.daddr, daddr, sizeof(*daddr)); 59 fl6.flowi6_proto = IPPROTO_UDP; 60 61 ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), 62 recv_sockets.sk6->sk, &fl6, 63 NULL); 64 if (IS_ERR(ndst)) { 65 rxe_dbg_qp(qp, "no route to %pI6\n", daddr); 66 return NULL; 67 } 68 69 if (unlikely(ndst->error)) { 70 rxe_dbg_qp(qp, "no route to %pI6\n", daddr); 71 goto put; 72 } 73 74 return ndst; 75 put: 76 dst_release(ndst); 77 return NULL; 78 } 79 80 #else 81 82 static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, 83 struct net_device *ndev, 84 struct in6_addr *saddr, 85 struct in6_addr *daddr) 86 { 87 return NULL; 88 } 89 90 #endif 91 92 static struct dst_entry *rxe_find_route(struct net_device *ndev, 93 struct rxe_qp *qp, 94 struct rxe_av *av) 95 { 96 struct dst_entry *dst = NULL; 97 98 if (qp_type(qp) == IB_QPT_RC) 99 dst = sk_dst_get(qp->sk->sk); 100 101 if (!dst || !dst_check(dst, qp->dst_cookie)) { 102 if (dst) 103 dst_release(dst); 104 105 if (av->network_type == RXE_NETWORK_TYPE_IPV4) { 106 struct in_addr *saddr; 107 struct in_addr *daddr; 108 109 saddr = &av->sgid_addr._sockaddr_in.sin_addr; 110 daddr = &av->dgid_addr._sockaddr_in.sin_addr; 111 dst = rxe_find_route4(qp, ndev, saddr, daddr); 112 } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { 113 struct in6_addr *saddr6; 114 struct in6_addr *daddr6; 115 116 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; 117 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; 118 dst = rxe_find_route6(qp, ndev, saddr6, daddr6); 119 #if IS_ENABLED(CONFIG_IPV6) 120 if (dst) 121 qp->dst_cookie = 122 rt6_get_cookie((struct rt6_info *)dst); 123 #endif 124 } 125 126 if (dst && (qp_type(qp) == IB_QPT_RC)) { 127 dst_hold(dst); 128 sk_dst_set(qp->sk->sk, dst); 129 } 130 } 131 return dst; 132 } 133 134 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 135 { 136 struct udphdr *udph; 137 struct rxe_dev *rxe; 138 struct net_device *ndev = skb->dev; 139 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 140 141 /* takes a reference on rxe->ib_dev 142 * drop when skb is freed 143 */ 144 rxe = rxe_get_dev_from_net(ndev); 145 if (!rxe && is_vlan_dev(ndev)) 146 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); 147 if (!rxe) 148 goto drop; 149 150 if (skb_linearize(skb)) { 151 ib_device_put(&rxe->ib_dev); 152 goto drop; 153 } 154 155 udph = udp_hdr(skb); 156 pkt->rxe = rxe; 157 pkt->port_num = 1; 158 pkt->hdr = (u8 *)(udph + 1); 159 pkt->mask = RXE_GRH_MASK; 160 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); 161 162 /* remove udp header */ 163 skb_pull(skb, sizeof(struct udphdr)); 164 165 rxe_rcv(skb); 166 167 return 0; 168 drop: 169 kfree_skb(skb); 170 171 return 0; 172 } 173 174 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, 175 bool ipv6) 176 { 177 int err; 178 struct socket *sock; 179 struct udp_port_cfg udp_cfg = { }; 180 struct udp_tunnel_sock_cfg tnl_cfg = { }; 181 182 if (ipv6) { 183 udp_cfg.family = AF_INET6; 184 udp_cfg.ipv6_v6only = 1; 185 } else { 186 udp_cfg.family = AF_INET; 187 } 188 189 udp_cfg.local_udp_port = port; 190 191 /* Create UDP socket */ 192 err = udp_sock_create(net, &udp_cfg, &sock); 193 if (err < 0) 194 return ERR_PTR(err); 195 196 tnl_cfg.encap_type = 1; 197 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 198 199 /* Setup UDP tunnel */ 200 setup_udp_tunnel_sock(net, sock, &tnl_cfg); 201 202 return sock; 203 } 204 205 static void rxe_release_udp_tunnel(struct socket *sk) 206 { 207 if (sk) 208 udp_tunnel_sock_release(sk); 209 } 210 211 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, 212 __be16 dst_port) 213 { 214 struct udphdr *udph; 215 216 __skb_push(skb, sizeof(*udph)); 217 skb_reset_transport_header(skb); 218 udph = udp_hdr(skb); 219 220 udph->dest = dst_port; 221 udph->source = src_port; 222 udph->len = htons(skb->len); 223 udph->check = 0; 224 } 225 226 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, 227 __be32 saddr, __be32 daddr, __u8 proto, 228 __u8 tos, __u8 ttl, __be16 df, bool xnet) 229 { 230 struct iphdr *iph; 231 232 skb_scrub_packet(skb, xnet); 233 234 skb_clear_hash(skb); 235 skb_dst_set(skb, dst_clone(dst)); 236 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 237 238 skb_push(skb, sizeof(struct iphdr)); 239 skb_reset_network_header(skb); 240 241 iph = ip_hdr(skb); 242 243 iph->version = IPVERSION; 244 iph->ihl = sizeof(struct iphdr) >> 2; 245 iph->tot_len = htons(skb->len); 246 iph->frag_off = df; 247 iph->protocol = proto; 248 iph->tos = tos; 249 iph->daddr = daddr; 250 iph->saddr = saddr; 251 iph->ttl = ttl; 252 __ip_select_ident(dev_net(dst->dev), iph, 253 skb_shinfo(skb)->gso_segs ?: 1); 254 } 255 256 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, 257 struct in6_addr *saddr, struct in6_addr *daddr, 258 __u8 proto, __u8 prio, __u8 ttl) 259 { 260 struct ipv6hdr *ip6h; 261 262 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 263 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 264 | IPSKB_REROUTED); 265 skb_dst_set(skb, dst_clone(dst)); 266 267 __skb_push(skb, sizeof(*ip6h)); 268 skb_reset_network_header(skb); 269 ip6h = ipv6_hdr(skb); 270 ip6_flow_hdr(ip6h, prio, htonl(0)); 271 ip6h->payload_len = htons(skb->len); 272 ip6h->nexthdr = proto; 273 ip6h->hop_limit = ttl; 274 ip6h->daddr = *daddr; 275 ip6h->saddr = *saddr; 276 ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); 277 } 278 279 static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, 280 struct sk_buff *skb) 281 { 282 struct rxe_qp *qp = pkt->qp; 283 struct dst_entry *dst; 284 bool xnet = false; 285 __be16 df = htons(IP_DF); 286 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; 287 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; 288 289 dst = rxe_find_route(skb->dev, qp, av); 290 if (!dst) { 291 rxe_dbg_qp(qp, "Host not reachable\n"); 292 return -EHOSTUNREACH; 293 } 294 295 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), 296 cpu_to_be16(ROCE_V2_UDP_DPORT)); 297 298 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, 299 av->grh.traffic_class, av->grh.hop_limit, df, xnet); 300 301 dst_release(dst); 302 return 0; 303 } 304 305 static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, 306 struct sk_buff *skb) 307 { 308 struct rxe_qp *qp = pkt->qp; 309 struct dst_entry *dst; 310 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; 311 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; 312 313 dst = rxe_find_route(skb->dev, qp, av); 314 if (!dst) { 315 rxe_dbg_qp(qp, "Host not reachable\n"); 316 return -EHOSTUNREACH; 317 } 318 319 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), 320 cpu_to_be16(ROCE_V2_UDP_DPORT)); 321 322 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, 323 av->grh.traffic_class, 324 av->grh.hop_limit); 325 326 dst_release(dst); 327 return 0; 328 } 329 330 int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, 331 struct sk_buff *skb) 332 { 333 int err = 0; 334 335 if (skb->protocol == htons(ETH_P_IP)) 336 err = prepare4(av, pkt, skb); 337 else if (skb->protocol == htons(ETH_P_IPV6)) 338 err = prepare6(av, pkt, skb); 339 340 if (ether_addr_equal(skb->dev->dev_addr, av->dmac)) 341 pkt->mask |= RXE_LOOPBACK_MASK; 342 343 return err; 344 } 345 346 static void rxe_skb_tx_dtor(struct sk_buff *skb) 347 { 348 struct net_device *ndev = skb->dev; 349 struct rxe_dev *rxe; 350 unsigned int qp_index; 351 struct rxe_qp *qp; 352 int skb_out; 353 354 rxe = rxe_get_dev_from_net(ndev); 355 if (!rxe && is_vlan_dev(ndev)) 356 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); 357 if (WARN_ON(!rxe)) 358 return; 359 360 qp_index = (int)(uintptr_t)skb->sk->sk_user_data; 361 if (!qp_index) 362 return; 363 364 qp = rxe_pool_get_index(&rxe->qp_pool, qp_index); 365 if (!qp) 366 goto put_dev; 367 368 skb_out = atomic_dec_return(&qp->skb_out); 369 if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW) 370 rxe_sched_task(&qp->send_task); 371 372 rxe_put(qp); 373 put_dev: 374 ib_device_put(&rxe->ib_dev); 375 sock_put(skb->sk); 376 } 377 378 static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) 379 { 380 int err; 381 struct sock *sk = pkt->qp->sk->sk; 382 383 sock_hold(sk); 384 skb->sk = sk; 385 skb->destructor = rxe_skb_tx_dtor; 386 atomic_inc(&pkt->qp->skb_out); 387 388 if (skb->protocol == htons(ETH_P_IP)) 389 err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 390 else 391 err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 392 393 return err; 394 } 395 396 /* fix up a send packet to match the packets 397 * received from UDP before looping them back 398 */ 399 static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) 400 { 401 struct sock *sk = pkt->qp->sk->sk; 402 403 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); 404 405 sock_hold(sk); 406 skb->sk = sk; 407 skb->destructor = rxe_skb_tx_dtor; 408 atomic_inc(&pkt->qp->skb_out); 409 410 if (skb->protocol == htons(ETH_P_IP)) 411 skb_pull(skb, sizeof(struct iphdr)); 412 else 413 skb_pull(skb, sizeof(struct ipv6hdr)); 414 415 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { 416 kfree_skb(skb); 417 return -EIO; 418 } 419 420 /* remove udp header */ 421 skb_pull(skb, sizeof(struct udphdr)); 422 423 rxe_rcv(skb); 424 425 return 0; 426 } 427 428 int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, 429 struct sk_buff *skb) 430 { 431 int err; 432 int is_request = pkt->mask & RXE_REQ_MASK; 433 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 434 unsigned long flags; 435 436 spin_lock_irqsave(&qp->state_lock, flags); 437 if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || 438 (!is_request && (qp_state(qp) < IB_QPS_RTR))) { 439 spin_unlock_irqrestore(&qp->state_lock, flags); 440 rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); 441 goto drop; 442 } 443 spin_unlock_irqrestore(&qp->state_lock, flags); 444 445 rxe_icrc_generate(skb, pkt); 446 447 if (pkt->mask & RXE_LOOPBACK_MASK) 448 err = rxe_loopback(skb, pkt); 449 else 450 err = rxe_send(skb, pkt); 451 if (err) { 452 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); 453 return err; 454 } 455 456 rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); 457 goto done; 458 459 drop: 460 kfree_skb(skb); 461 err = 0; 462 done: 463 return err; 464 } 465 466 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, 467 int paylen, struct rxe_pkt_info *pkt) 468 { 469 unsigned int hdr_len; 470 struct sk_buff *skb = NULL; 471 struct net_device *ndev; 472 const struct ib_gid_attr *attr; 473 const int port_num = 1; 474 475 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); 476 if (IS_ERR(attr)) 477 return NULL; 478 479 if (av->network_type == RXE_NETWORK_TYPE_IPV4) 480 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 481 sizeof(struct iphdr); 482 else 483 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 484 sizeof(struct ipv6hdr); 485 486 rcu_read_lock(); 487 ndev = rdma_read_gid_attr_ndev_rcu(attr); 488 if (IS_ERR(ndev)) { 489 rcu_read_unlock(); 490 goto out; 491 } 492 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev), 493 GFP_ATOMIC); 494 495 if (unlikely(!skb)) { 496 rcu_read_unlock(); 497 goto out; 498 } 499 500 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); 501 502 /* FIXME: hold reference to this netdev until life of this skb. */ 503 skb->dev = ndev; 504 rcu_read_unlock(); 505 506 if (av->network_type == RXE_NETWORK_TYPE_IPV4) 507 skb->protocol = htons(ETH_P_IP); 508 else 509 skb->protocol = htons(ETH_P_IPV6); 510 511 pkt->rxe = rxe; 512 pkt->port_num = port_num; 513 pkt->hdr = skb_put(skb, paylen); 514 pkt->mask |= RXE_GRH_MASK; 515 516 out: 517 rdma_put_gid_attr(attr); 518 return skb; 519 } 520 521 /* 522 * this is required by rxe_cfg to match rxe devices in 523 * /sys/class/infiniband up with their underlying ethernet devices 524 */ 525 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) 526 { 527 return rxe->ndev->name; 528 } 529 530 int rxe_net_add(const char *ibdev_name, struct net_device *ndev) 531 { 532 int err; 533 struct rxe_dev *rxe = NULL; 534 535 rxe = ib_alloc_device(rxe_dev, ib_dev); 536 if (!rxe) 537 return -ENOMEM; 538 539 rxe->ndev = ndev; 540 ib_mark_name_assigned_by_user(&rxe->ib_dev); 541 542 err = rxe_add(rxe, ndev->mtu, ibdev_name); 543 if (err) { 544 ib_dealloc_device(&rxe->ib_dev); 545 return err; 546 } 547 548 return 0; 549 } 550 551 static void rxe_port_event(struct rxe_dev *rxe, 552 enum ib_event_type event) 553 { 554 struct ib_event ev; 555 556 ev.device = &rxe->ib_dev; 557 ev.element.port_num = 1; 558 ev.event = event; 559 560 ib_dispatch_event(&ev); 561 } 562 563 /* Caller must hold net_info_lock */ 564 void rxe_port_up(struct rxe_dev *rxe) 565 { 566 struct rxe_port *port; 567 568 port = &rxe->port; 569 port->attr.state = IB_PORT_ACTIVE; 570 571 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); 572 dev_info(&rxe->ib_dev.dev, "set active\n"); 573 } 574 575 /* Caller must hold net_info_lock */ 576 void rxe_port_down(struct rxe_dev *rxe) 577 { 578 struct rxe_port *port; 579 580 port = &rxe->port; 581 port->attr.state = IB_PORT_DOWN; 582 583 rxe_port_event(rxe, IB_EVENT_PORT_ERR); 584 rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); 585 dev_info(&rxe->ib_dev.dev, "set down\n"); 586 } 587 588 void rxe_set_port_state(struct rxe_dev *rxe) 589 { 590 if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev)) 591 rxe_port_up(rxe); 592 else 593 rxe_port_down(rxe); 594 } 595 596 static int rxe_notify(struct notifier_block *not_blk, 597 unsigned long event, 598 void *arg) 599 { 600 struct net_device *ndev = netdev_notifier_info_to_dev(arg); 601 struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); 602 603 if (!rxe) 604 return NOTIFY_OK; 605 606 switch (event) { 607 case NETDEV_UNREGISTER: 608 ib_unregister_device_queued(&rxe->ib_dev); 609 break; 610 case NETDEV_UP: 611 rxe_port_up(rxe); 612 break; 613 case NETDEV_DOWN: 614 rxe_port_down(rxe); 615 break; 616 case NETDEV_CHANGEMTU: 617 rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); 618 rxe_set_mtu(rxe, ndev->mtu); 619 break; 620 case NETDEV_CHANGE: 621 rxe_set_port_state(rxe); 622 break; 623 case NETDEV_REBOOT: 624 case NETDEV_GOING_DOWN: 625 case NETDEV_CHANGEADDR: 626 case NETDEV_CHANGENAME: 627 case NETDEV_FEAT_CHANGE: 628 default: 629 rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n", 630 event, ndev->name); 631 break; 632 } 633 634 ib_device_put(&rxe->ib_dev); 635 return NOTIFY_OK; 636 } 637 638 static struct notifier_block rxe_net_notifier = { 639 .notifier_call = rxe_notify, 640 }; 641 642 static int rxe_net_ipv4_init(void) 643 { 644 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, 645 htons(ROCE_V2_UDP_DPORT), false); 646 if (IS_ERR(recv_sockets.sk4)) { 647 recv_sockets.sk4 = NULL; 648 pr_err("Failed to create IPv4 UDP tunnel\n"); 649 return -1; 650 } 651 652 return 0; 653 } 654 655 static int rxe_net_ipv6_init(void) 656 { 657 #if IS_ENABLED(CONFIG_IPV6) 658 659 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, 660 htons(ROCE_V2_UDP_DPORT), true); 661 if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) { 662 recv_sockets.sk6 = NULL; 663 pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n"); 664 return 0; 665 } 666 667 if (IS_ERR(recv_sockets.sk6)) { 668 recv_sockets.sk6 = NULL; 669 pr_err("Failed to create IPv6 UDP tunnel\n"); 670 return -1; 671 } 672 #endif 673 return 0; 674 } 675 676 void rxe_net_exit(void) 677 { 678 rxe_release_udp_tunnel(recv_sockets.sk6); 679 rxe_release_udp_tunnel(recv_sockets.sk4); 680 unregister_netdevice_notifier(&rxe_net_notifier); 681 } 682 683 int rxe_net_init(void) 684 { 685 int err; 686 687 recv_sockets.sk6 = NULL; 688 689 err = rxe_net_ipv4_init(); 690 if (err) 691 return err; 692 err = rxe_net_ipv6_init(); 693 if (err) 694 goto err_out; 695 err = register_netdevice_notifier(&rxe_net_notifier); 696 if (err) { 697 pr_err("Failed to register netdev notifier\n"); 698 goto err_out; 699 } 700 return 0; 701 err_out: 702 rxe_net_exit(); 703 return err; 704 } 705