1 // SPDX-License-Identifier: GPL-2.0 2 /* Bareudp: UDP tunnel encasulation for different Payload types like 3 * MPLS, NSH, IP, etc. 4 * Copyright (c) 2019 Nokia, Inc. 5 * Authors: Martin Varghese, <martin.varghese@nokia.com> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/etherdevice.h> 13 #include <linux/hash.h> 14 #include <net/dst_metadata.h> 15 #include <net/gro_cells.h> 16 #include <net/rtnetlink.h> 17 #include <net/protocol.h> 18 #include <net/ip6_tunnel.h> 19 #include <net/ip_tunnels.h> 20 #include <net/udp_tunnel.h> 21 #include <net/bareudp.h> 22 23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr) 24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \ 25 sizeof(struct udphdr)) 26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \ 27 sizeof(struct udphdr)) 28 29 static bool log_ecn_error = true; 30 module_param(log_ecn_error, bool, 0644); 31 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 32 33 /* per-network namespace private data for this module */ 34 35 static unsigned int bareudp_net_id; 36 37 struct bareudp_net { 38 struct list_head bareudp_list; 39 }; 40 41 struct bareudp_conf { 42 __be16 ethertype; 43 __be16 port; 44 u16 sport_min; 45 bool multi_proto_mode; 46 }; 47 48 /* Pseudo network device */ 49 struct bareudp_dev { 50 struct net *net; /* netns for packet i/o */ 51 struct net_device *dev; /* netdev for bareudp tunnel */ 52 __be16 ethertype; 53 __be16 port; 54 u16 sport_min; 55 bool multi_proto_mode; 56 struct socket __rcu *sock; 57 struct list_head next; /* bareudp node on namespace list */ 58 struct gro_cells gro_cells; 59 }; 60 61 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 62 { 63 struct metadata_dst *tun_dst = NULL; 64 IP_TUNNEL_DECLARE_FLAGS(key) = { }; 65 struct bareudp_dev *bareudp; 66 unsigned short family; 67 unsigned int len; 68 __be16 proto; 69 void *oiph; 70 int err; 71 72 bareudp = rcu_dereference_sk_user_data(sk); 73 if (!bareudp) 74 goto drop; 75 76 if (skb->protocol == htons(ETH_P_IP)) 77 family = AF_INET; 78 else 79 family = AF_INET6; 80 81 if (bareudp->ethertype == htons(ETH_P_IP)) { 82 __u8 ipversion; 83 84 if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, 85 sizeof(ipversion))) { 86 bareudp->dev->stats.rx_dropped++; 87 goto drop; 88 } 89 ipversion >>= 4; 90 91 if (ipversion == 4) { 92 proto = htons(ETH_P_IP); 93 } else if (ipversion == 6 && bareudp->multi_proto_mode) { 94 proto = htons(ETH_P_IPV6); 95 } else { 96 bareudp->dev->stats.rx_dropped++; 97 goto drop; 98 } 99 } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { 100 struct iphdr *tunnel_hdr; 101 102 tunnel_hdr = (struct iphdr *)skb_network_header(skb); 103 if (tunnel_hdr->version == 4) { 104 if (!ipv4_is_multicast(tunnel_hdr->daddr)) { 105 proto = bareudp->ethertype; 106 } else if (bareudp->multi_proto_mode && 107 ipv4_is_multicast(tunnel_hdr->daddr)) { 108 proto = htons(ETH_P_MPLS_MC); 109 } else { 110 bareudp->dev->stats.rx_dropped++; 111 goto drop; 112 } 113 } else { 114 int addr_type; 115 struct ipv6hdr *tunnel_hdr_v6; 116 117 tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb); 118 addr_type = 119 ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr); 120 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 121 proto = bareudp->ethertype; 122 } else if (bareudp->multi_proto_mode && 123 (addr_type & IPV6_ADDR_MULTICAST)) { 124 proto = htons(ETH_P_MPLS_MC); 125 } else { 126 bareudp->dev->stats.rx_dropped++; 127 goto drop; 128 } 129 } 130 } else { 131 proto = bareudp->ethertype; 132 } 133 134 if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN, 135 proto, 136 !net_eq(bareudp->net, 137 dev_net(bareudp->dev)))) { 138 bareudp->dev->stats.rx_dropped++; 139 goto drop; 140 } 141 142 __set_bit(IP_TUNNEL_KEY_BIT, key); 143 144 tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); 145 if (!tun_dst) { 146 bareudp->dev->stats.rx_dropped++; 147 goto drop; 148 } 149 skb_dst_set(skb, &tun_dst->dst); 150 skb->dev = bareudp->dev; 151 oiph = skb_network_header(skb); 152 skb_reset_network_header(skb); 153 skb_reset_mac_header(skb); 154 155 if (!ipv6_mod_enabled() || family == AF_INET) 156 err = IP_ECN_decapsulate(oiph, skb); 157 else 158 err = IP6_ECN_decapsulate(oiph, skb); 159 160 if (unlikely(err)) { 161 if (log_ecn_error) { 162 if (!ipv6_mod_enabled() || family == AF_INET) 163 net_info_ratelimited("non-ECT from %pI4 " 164 "with TOS=%#x\n", 165 &((struct iphdr *)oiph)->saddr, 166 ((struct iphdr *)oiph)->tos); 167 else 168 net_info_ratelimited("non-ECT from %pI6\n", 169 &((struct ipv6hdr *)oiph)->saddr); 170 } 171 if (err > 1) { 172 ++bareudp->dev->stats.rx_frame_errors; 173 ++bareudp->dev->stats.rx_errors; 174 goto drop; 175 } 176 } 177 178 len = skb->len; 179 err = gro_cells_receive(&bareudp->gro_cells, skb); 180 if (likely(err == NET_RX_SUCCESS)) 181 dev_sw_netstats_rx_add(bareudp->dev, len); 182 183 return 0; 184 drop: 185 /* Consume bad packet */ 186 kfree_skb(skb); 187 188 return 0; 189 } 190 191 static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb) 192 { 193 return 0; 194 } 195 196 static int bareudp_init(struct net_device *dev) 197 { 198 struct bareudp_dev *bareudp = netdev_priv(dev); 199 int err; 200 201 err = gro_cells_init(&bareudp->gro_cells, dev); 202 if (err) 203 return err; 204 205 return 0; 206 } 207 208 static void bareudp_uninit(struct net_device *dev) 209 { 210 struct bareudp_dev *bareudp = netdev_priv(dev); 211 212 gro_cells_destroy(&bareudp->gro_cells); 213 } 214 215 static struct socket *bareudp_create_sock(struct net *net, __be16 port) 216 { 217 struct udp_port_cfg udp_conf; 218 struct socket *sock; 219 int err; 220 221 memset(&udp_conf, 0, sizeof(udp_conf)); 222 223 if (ipv6_mod_enabled()) 224 udp_conf.family = AF_INET6; 225 else 226 udp_conf.family = AF_INET; 227 228 udp_conf.local_udp_port = port; 229 /* Open UDP socket */ 230 err = udp_sock_create(net, &udp_conf, &sock); 231 if (err < 0) 232 return ERR_PTR(err); 233 234 udp_allow_gso(sock->sk); 235 return sock; 236 } 237 238 /* Create new listen socket if needed */ 239 static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) 240 { 241 struct udp_tunnel_sock_cfg tunnel_cfg; 242 struct socket *sock; 243 244 sock = bareudp_create_sock(bareudp->net, port); 245 if (IS_ERR(sock)) 246 return PTR_ERR(sock); 247 248 /* Mark socket as an encapsulation socket */ 249 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 250 tunnel_cfg.sk_user_data = bareudp; 251 tunnel_cfg.encap_type = 1; 252 tunnel_cfg.encap_rcv = bareudp_udp_encap_recv; 253 tunnel_cfg.encap_err_lookup = bareudp_err_lookup; 254 tunnel_cfg.encap_destroy = NULL; 255 setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); 256 257 rcu_assign_pointer(bareudp->sock, sock); 258 return 0; 259 } 260 261 static int bareudp_open(struct net_device *dev) 262 { 263 struct bareudp_dev *bareudp = netdev_priv(dev); 264 int ret = 0; 265 266 ret = bareudp_socket_create(bareudp, bareudp->port); 267 return ret; 268 } 269 270 static void bareudp_sock_release(struct bareudp_dev *bareudp) 271 { 272 struct socket *sock; 273 274 sock = bareudp->sock; 275 rcu_assign_pointer(bareudp->sock, NULL); 276 synchronize_net(); 277 udp_tunnel_sock_release(sock); 278 } 279 280 static int bareudp_stop(struct net_device *dev) 281 { 282 struct bareudp_dev *bareudp = netdev_priv(dev); 283 284 bareudp_sock_release(bareudp); 285 return 0; 286 } 287 288 static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, 289 struct bareudp_dev *bareudp, 290 const struct ip_tunnel_info *info) 291 { 292 bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 293 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 294 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 295 struct socket *sock = rcu_dereference(bareudp->sock); 296 const struct ip_tunnel_key *key = &info->key; 297 struct rtable *rt; 298 __be16 sport, df; 299 int min_headroom; 300 __u8 tos, ttl; 301 __be32 saddr; 302 int err; 303 304 if (!sock) 305 return -ESHUTDOWN; 306 307 sport = udp_flow_src_port(bareudp->net, skb, 308 bareudp->sport_min, USHRT_MAX, 309 true); 310 rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, 311 sport, bareudp->port, key->tos, 312 use_cache ? 313 (struct dst_cache *)&info->dst_cache : NULL); 314 315 if (IS_ERR(rt)) 316 return PTR_ERR(rt); 317 318 skb_tunnel_check_pmtu(skb, &rt->dst, 319 BAREUDP_IPV4_HLEN + info->options_len, false); 320 321 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 322 ttl = key->ttl; 323 df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? 324 htons(IP_DF) : 0; 325 skb_scrub_packet(skb, xnet); 326 327 err = -ENOSPC; 328 if (!skb_pull(skb, skb_network_offset(skb))) 329 goto free_dst; 330 331 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + 332 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); 333 334 err = skb_cow_head(skb, min_headroom); 335 if (unlikely(err)) 336 goto free_dst; 337 338 err = udp_tunnel_handle_offloads(skb, udp_sum); 339 if (err) 340 goto free_dst; 341 342 skb_set_inner_protocol(skb, bareudp->ethertype); 343 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, 344 tos, ttl, df, sport, bareudp->port, 345 !net_eq(bareudp->net, dev_net(bareudp->dev)), 346 !test_bit(IP_TUNNEL_CSUM_BIT, 347 info->key.tun_flags)); 348 return 0; 349 350 free_dst: 351 dst_release(&rt->dst); 352 return err; 353 } 354 355 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, 356 struct bareudp_dev *bareudp, 357 const struct ip_tunnel_info *info) 358 { 359 bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 360 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 361 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 362 struct socket *sock = rcu_dereference(bareudp->sock); 363 const struct ip_tunnel_key *key = &info->key; 364 struct dst_entry *dst = NULL; 365 struct in6_addr saddr, daddr; 366 int min_headroom; 367 __u8 prio, ttl; 368 __be16 sport; 369 int err; 370 371 if (!sock) 372 return -ESHUTDOWN; 373 374 sport = udp_flow_src_port(bareudp->net, skb, 375 bareudp->sport_min, USHRT_MAX, 376 true); 377 dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, 378 key, sport, bareudp->port, key->tos, 379 use_cache ? 380 (struct dst_cache *) &info->dst_cache : NULL); 381 if (IS_ERR(dst)) 382 return PTR_ERR(dst); 383 384 skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, 385 false); 386 387 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 388 ttl = key->ttl; 389 390 skb_scrub_packet(skb, xnet); 391 392 err = -ENOSPC; 393 if (!skb_pull(skb, skb_network_offset(skb))) 394 goto free_dst; 395 396 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + 397 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr); 398 399 err = skb_cow_head(skb, min_headroom); 400 if (unlikely(err)) 401 goto free_dst; 402 403 err = udp_tunnel_handle_offloads(skb, udp_sum); 404 if (err) 405 goto free_dst; 406 407 daddr = info->key.u.ipv6.dst; 408 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, 409 &saddr, &daddr, prio, ttl, 410 info->key.label, sport, bareudp->port, 411 !test_bit(IP_TUNNEL_CSUM_BIT, 412 info->key.tun_flags)); 413 return 0; 414 415 free_dst: 416 dst_release(dst); 417 return err; 418 } 419 420 static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) 421 { 422 if (bareudp->ethertype == proto) 423 return true; 424 425 if (!bareudp->multi_proto_mode) 426 return false; 427 428 if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && 429 proto == htons(ETH_P_MPLS_MC)) 430 return true; 431 432 if (bareudp->ethertype == htons(ETH_P_IP) && 433 proto == htons(ETH_P_IPV6)) 434 return true; 435 436 return false; 437 } 438 439 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) 440 { 441 struct bareudp_dev *bareudp = netdev_priv(dev); 442 struct ip_tunnel_info *info = NULL; 443 int err; 444 445 if (!bareudp_proto_valid(bareudp, skb->protocol)) { 446 err = -EINVAL; 447 goto tx_error; 448 } 449 450 info = skb_tunnel_info(skb); 451 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 452 err = -EINVAL; 453 goto tx_error; 454 } 455 456 rcu_read_lock(); 457 if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) 458 err = bareudp6_xmit_skb(skb, dev, bareudp, info); 459 else 460 err = bareudp_xmit_skb(skb, dev, bareudp, info); 461 462 rcu_read_unlock(); 463 464 if (likely(!err)) 465 return NETDEV_TX_OK; 466 tx_error: 467 dev_kfree_skb(skb); 468 469 if (err == -ELOOP) 470 dev->stats.collisions++; 471 else if (err == -ENETUNREACH) 472 dev->stats.tx_carrier_errors++; 473 474 dev->stats.tx_errors++; 475 return NETDEV_TX_OK; 476 } 477 478 static int bareudp_fill_metadata_dst(struct net_device *dev, 479 struct sk_buff *skb) 480 { 481 struct ip_tunnel_info *info = skb_tunnel_info(skb); 482 struct bareudp_dev *bareudp = netdev_priv(dev); 483 bool use_cache; 484 __be16 sport; 485 486 use_cache = ip_tunnel_dst_cache_usable(skb, info); 487 sport = udp_flow_src_port(bareudp->net, skb, 488 bareudp->sport_min, USHRT_MAX, 489 true); 490 491 if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { 492 struct rtable *rt; 493 __be32 saddr; 494 495 rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, 496 &info->key, sport, bareudp->port, 497 info->key.tos, 498 use_cache ? &info->dst_cache : NULL); 499 if (IS_ERR(rt)) 500 return PTR_ERR(rt); 501 502 ip_rt_put(rt); 503 info->key.u.ipv4.src = saddr; 504 } else if (ip_tunnel_info_af(info) == AF_INET6) { 505 struct dst_entry *dst; 506 struct in6_addr saddr; 507 struct socket *sock = rcu_dereference(bareudp->sock); 508 509 dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 510 0, &saddr, &info->key, 511 sport, bareudp->port, info->key.tos, 512 use_cache ? &info->dst_cache : NULL); 513 if (IS_ERR(dst)) 514 return PTR_ERR(dst); 515 516 dst_release(dst); 517 info->key.u.ipv6.src = saddr; 518 } else { 519 return -EINVAL; 520 } 521 522 info->key.tp_src = sport; 523 info->key.tp_dst = bareudp->port; 524 return 0; 525 } 526 527 static const struct net_device_ops bareudp_netdev_ops = { 528 .ndo_init = bareudp_init, 529 .ndo_uninit = bareudp_uninit, 530 .ndo_open = bareudp_open, 531 .ndo_stop = bareudp_stop, 532 .ndo_start_xmit = bareudp_xmit, 533 .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, 534 }; 535 536 static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { 537 [IFLA_BAREUDP_PORT] = { .type = NLA_U16 }, 538 [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 }, 539 [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 }, 540 [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG }, 541 }; 542 543 /* Info for udev, that this is a virtual tunnel endpoint */ 544 static const struct device_type bareudp_type = { 545 .name = "bareudp", 546 }; 547 548 /* Initialize the device structure. */ 549 static void bareudp_setup(struct net_device *dev) 550 { 551 dev->netdev_ops = &bareudp_netdev_ops; 552 dev->needs_free_netdev = true; 553 SET_NETDEV_DEVTYPE(dev, &bareudp_type); 554 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 555 dev->features |= NETIF_F_RXCSUM; 556 dev->features |= NETIF_F_LLTX; 557 dev->features |= NETIF_F_GSO_SOFTWARE; 558 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 559 dev->hw_features |= NETIF_F_RXCSUM; 560 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 561 dev->hard_header_len = 0; 562 dev->addr_len = 0; 563 dev->mtu = ETH_DATA_LEN; 564 dev->min_mtu = IPV4_MIN_MTU; 565 dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN; 566 dev->type = ARPHRD_NONE; 567 netif_keep_dst(dev); 568 dev->priv_flags |= IFF_NO_QUEUE; 569 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 570 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 571 } 572 573 static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], 574 struct netlink_ext_ack *extack) 575 { 576 if (!data) { 577 NL_SET_ERR_MSG(extack, 578 "Not enough attributes provided to perform the operation"); 579 return -EINVAL; 580 } 581 return 0; 582 } 583 584 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, 585 struct netlink_ext_ack *extack) 586 { 587 memset(conf, 0, sizeof(*conf)); 588 589 if (!data[IFLA_BAREUDP_PORT]) { 590 NL_SET_ERR_MSG(extack, "port not specified"); 591 return -EINVAL; 592 } 593 if (!data[IFLA_BAREUDP_ETHERTYPE]) { 594 NL_SET_ERR_MSG(extack, "ethertype not specified"); 595 return -EINVAL; 596 } 597 598 conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); 599 conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); 600 601 if (data[IFLA_BAREUDP_SRCPORT_MIN]) 602 conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); 603 604 if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) 605 conf->multi_proto_mode = true; 606 607 return 0; 608 } 609 610 static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, 611 const struct bareudp_conf *conf) 612 { 613 struct bareudp_dev *bareudp, *t = NULL; 614 615 list_for_each_entry(bareudp, &bn->bareudp_list, next) { 616 if (conf->port == bareudp->port) 617 t = bareudp; 618 } 619 return t; 620 } 621 622 static int bareudp_configure(struct net *net, struct net_device *dev, 623 struct bareudp_conf *conf, 624 struct netlink_ext_ack *extack) 625 { 626 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 627 struct bareudp_dev *t, *bareudp = netdev_priv(dev); 628 int err; 629 630 bareudp->net = net; 631 bareudp->dev = dev; 632 t = bareudp_find_dev(bn, conf); 633 if (t) { 634 NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists"); 635 return -EBUSY; 636 } 637 638 if (conf->multi_proto_mode && 639 (conf->ethertype != htons(ETH_P_MPLS_UC) && 640 conf->ethertype != htons(ETH_P_IP))) { 641 NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)"); 642 return -EINVAL; 643 } 644 645 bareudp->port = conf->port; 646 bareudp->ethertype = conf->ethertype; 647 bareudp->sport_min = conf->sport_min; 648 bareudp->multi_proto_mode = conf->multi_proto_mode; 649 650 err = register_netdevice(dev); 651 if (err) 652 return err; 653 654 list_add(&bareudp->next, &bn->bareudp_list); 655 return 0; 656 } 657 658 static int bareudp_link_config(struct net_device *dev, 659 struct nlattr *tb[]) 660 { 661 int err; 662 663 if (tb[IFLA_MTU]) { 664 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 665 if (err) 666 return err; 667 } 668 return 0; 669 } 670 671 static void bareudp_dellink(struct net_device *dev, struct list_head *head) 672 { 673 struct bareudp_dev *bareudp = netdev_priv(dev); 674 675 list_del(&bareudp->next); 676 unregister_netdevice_queue(dev, head); 677 } 678 679 static int bareudp_newlink(struct net *net, struct net_device *dev, 680 struct nlattr *tb[], struct nlattr *data[], 681 struct netlink_ext_ack *extack) 682 { 683 struct bareudp_conf conf; 684 int err; 685 686 err = bareudp2info(data, &conf, extack); 687 if (err) 688 return err; 689 690 err = bareudp_configure(net, dev, &conf, extack); 691 if (err) 692 return err; 693 694 err = bareudp_link_config(dev, tb); 695 if (err) 696 goto err_unconfig; 697 698 return 0; 699 700 err_unconfig: 701 bareudp_dellink(dev, NULL); 702 return err; 703 } 704 705 static size_t bareudp_get_size(const struct net_device *dev) 706 { 707 return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */ 708 nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ 709 nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ 710 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ 711 0; 712 } 713 714 static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) 715 { 716 struct bareudp_dev *bareudp = netdev_priv(dev); 717 718 if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port)) 719 goto nla_put_failure; 720 if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype)) 721 goto nla_put_failure; 722 if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min)) 723 goto nla_put_failure; 724 if (bareudp->multi_proto_mode && 725 nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) 726 goto nla_put_failure; 727 728 return 0; 729 730 nla_put_failure: 731 return -EMSGSIZE; 732 } 733 734 static struct rtnl_link_ops bareudp_link_ops __read_mostly = { 735 .kind = "bareudp", 736 .maxtype = IFLA_BAREUDP_MAX, 737 .policy = bareudp_policy, 738 .priv_size = sizeof(struct bareudp_dev), 739 .setup = bareudp_setup, 740 .validate = bareudp_validate, 741 .newlink = bareudp_newlink, 742 .dellink = bareudp_dellink, 743 .get_size = bareudp_get_size, 744 .fill_info = bareudp_fill_info, 745 }; 746 747 static __net_init int bareudp_init_net(struct net *net) 748 { 749 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 750 751 INIT_LIST_HEAD(&bn->bareudp_list); 752 return 0; 753 } 754 755 static void bareudp_destroy_tunnels(struct net *net, struct list_head *head) 756 { 757 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 758 struct bareudp_dev *bareudp, *next; 759 760 list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next) 761 unregister_netdevice_queue(bareudp->dev, head); 762 } 763 764 static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list, 765 struct list_head *dev_kill_list) 766 { 767 struct net *net; 768 769 list_for_each_entry(net, net_list, exit_list) 770 bareudp_destroy_tunnels(net, dev_kill_list); 771 } 772 773 static struct pernet_operations bareudp_net_ops = { 774 .init = bareudp_init_net, 775 .exit_batch_rtnl = bareudp_exit_batch_rtnl, 776 .id = &bareudp_net_id, 777 .size = sizeof(struct bareudp_net), 778 }; 779 780 static int __init bareudp_init_module(void) 781 { 782 int rc; 783 784 rc = register_pernet_subsys(&bareudp_net_ops); 785 if (rc) 786 goto out1; 787 788 rc = rtnl_link_register(&bareudp_link_ops); 789 if (rc) 790 goto out2; 791 792 return 0; 793 out2: 794 unregister_pernet_subsys(&bareudp_net_ops); 795 out1: 796 return rc; 797 } 798 late_initcall(bareudp_init_module); 799 800 static void __exit bareudp_cleanup_module(void) 801 { 802 rtnl_link_unregister(&bareudp_link_ops); 803 unregister_pernet_subsys(&bareudp_net_ops); 804 } 805 module_exit(bareudp_cleanup_module); 806 807 MODULE_ALIAS_RTNL_LINK("bareudp"); 808 MODULE_LICENSE("GPL"); 809 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>"); 810 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic"); 811