1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/seqlock.h> 6 #include <linux/netlink.h> 7 #include <linux/netfilter.h> 8 #include <linux/netfilter/nf_tables.h> 9 #include <net/netfilter/nf_tables.h> 10 #include <net/dst_metadata.h> 11 #include <net/ip_tunnels.h> 12 #include <net/vxlan.h> 13 #include <net/erspan.h> 14 #include <net/geneve.h> 15 16 struct nft_tunnel { 17 enum nft_tunnel_keys key:8; 18 u8 dreg; 19 enum nft_tunnel_mode mode:8; 20 u8 len; 21 }; 22 23 static void nft_tunnel_get_eval(const struct nft_expr *expr, 24 struct nft_regs *regs, 25 const struct nft_pktinfo *pkt) 26 { 27 const struct nft_tunnel *priv = nft_expr_priv(expr); 28 u32 *dest = ®s->data[priv->dreg]; 29 struct ip_tunnel_info *tun_info; 30 31 tun_info = skb_tunnel_info(pkt->skb); 32 33 switch (priv->key) { 34 case NFT_TUNNEL_PATH: 35 if (!tun_info) { 36 nft_reg_store8(dest, false); 37 return; 38 } 39 if (priv->mode == NFT_TUNNEL_MODE_NONE || 40 (priv->mode == NFT_TUNNEL_MODE_RX && 41 !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 42 (priv->mode == NFT_TUNNEL_MODE_TX && 43 (tun_info->mode & IP_TUNNEL_INFO_TX))) 44 nft_reg_store8(dest, true); 45 else 46 nft_reg_store8(dest, false); 47 break; 48 case NFT_TUNNEL_ID: 49 if (!tun_info) { 50 regs->verdict.code = NFT_BREAK; 51 return; 52 } 53 if (priv->mode == NFT_TUNNEL_MODE_NONE || 54 (priv->mode == NFT_TUNNEL_MODE_RX && 55 !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 56 (priv->mode == NFT_TUNNEL_MODE_TX && 57 (tun_info->mode & IP_TUNNEL_INFO_TX))) 58 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id)); 59 else 60 regs->verdict.code = NFT_BREAK; 61 break; 62 default: 63 WARN_ON(1); 64 regs->verdict.code = NFT_BREAK; 65 } 66 } 67 68 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = { 69 [NFTA_TUNNEL_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), 70 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 }, 71 [NFTA_TUNNEL_MODE] = NLA_POLICY_MAX(NLA_BE32, 255), 72 }; 73 74 static int nft_tunnel_get_init(const struct nft_ctx *ctx, 75 const struct nft_expr *expr, 76 const struct nlattr * const tb[]) 77 { 78 struct nft_tunnel *priv = nft_expr_priv(expr); 79 u32 len; 80 81 if (!tb[NFTA_TUNNEL_KEY] || 82 !tb[NFTA_TUNNEL_DREG]) 83 return -EINVAL; 84 85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY])); 86 switch (priv->key) { 87 case NFT_TUNNEL_PATH: 88 len = sizeof(u8); 89 break; 90 case NFT_TUNNEL_ID: 91 len = sizeof(u32); 92 break; 93 default: 94 return -EOPNOTSUPP; 95 } 96 97 if (tb[NFTA_TUNNEL_MODE]) { 98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); 99 if (priv->mode > NFT_TUNNEL_MODE_MAX) 100 return -EOPNOTSUPP; 101 } else { 102 priv->mode = NFT_TUNNEL_MODE_NONE; 103 } 104 105 priv->len = len; 106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg, 107 NULL, NFT_DATA_VALUE, len); 108 } 109 110 static int nft_tunnel_get_dump(struct sk_buff *skb, 111 const struct nft_expr *expr, bool reset) 112 { 113 const struct nft_tunnel *priv = nft_expr_priv(expr); 114 115 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key))) 116 goto nla_put_failure; 117 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg)) 118 goto nla_put_failure; 119 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode))) 120 goto nla_put_failure; 121 return 0; 122 123 nla_put_failure: 124 return -1; 125 } 126 127 static bool nft_tunnel_get_reduce(struct nft_regs_track *track, 128 const struct nft_expr *expr) 129 { 130 const struct nft_tunnel *priv = nft_expr_priv(expr); 131 const struct nft_tunnel *tunnel; 132 133 if (!nft_reg_track_cmp(track, expr, priv->dreg)) { 134 nft_reg_track_update(track, expr, priv->dreg, priv->len); 135 return false; 136 } 137 138 tunnel = nft_expr_priv(track->regs[priv->dreg].selector); 139 if (priv->key != tunnel->key || 140 priv->dreg != tunnel->dreg || 141 priv->mode != tunnel->mode) { 142 nft_reg_track_update(track, expr, priv->dreg, priv->len); 143 return false; 144 } 145 146 if (!track->regs[priv->dreg].bitwise) 147 return true; 148 149 return false; 150 } 151 152 static struct nft_expr_type nft_tunnel_type; 153 static const struct nft_expr_ops nft_tunnel_get_ops = { 154 .type = &nft_tunnel_type, 155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)), 156 .eval = nft_tunnel_get_eval, 157 .init = nft_tunnel_get_init, 158 .dump = nft_tunnel_get_dump, 159 .reduce = nft_tunnel_get_reduce, 160 }; 161 162 static struct nft_expr_type nft_tunnel_type __read_mostly = { 163 .name = "tunnel", 164 .family = NFPROTO_NETDEV, 165 .ops = &nft_tunnel_get_ops, 166 .policy = nft_tunnel_policy, 167 .maxattr = NFTA_TUNNEL_MAX, 168 .owner = THIS_MODULE, 169 }; 170 171 struct nft_tunnel_opts { 172 union { 173 struct vxlan_metadata vxlan; 174 struct erspan_metadata erspan; 175 u8 data[IP_TUNNEL_OPTS_MAX]; 176 } u; 177 IP_TUNNEL_DECLARE_FLAGS(flags); 178 u32 len; 179 }; 180 181 struct nft_tunnel_obj { 182 struct metadata_dst *md; 183 struct nft_tunnel_opts opts; 184 }; 185 186 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = { 187 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 }, 188 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 }, 189 }; 190 191 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx, 192 const struct nlattr *attr, 193 struct ip_tunnel_info *info) 194 { 195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1]; 196 int err; 197 198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr, 199 nft_tunnel_ip_policy, NULL); 200 if (err < 0) 201 return err; 202 203 if (!tb[NFTA_TUNNEL_KEY_IP_DST]) 204 return -EINVAL; 205 206 if (tb[NFTA_TUNNEL_KEY_IP_SRC]) 207 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]); 208 if (tb[NFTA_TUNNEL_KEY_IP_DST]) 209 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]); 210 211 return 0; 212 } 213 214 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = { 215 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), }, 216 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), }, 217 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, } 218 }; 219 220 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx, 221 const struct nlattr *attr, 222 struct ip_tunnel_info *info) 223 { 224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1]; 225 int err; 226 227 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr, 228 nft_tunnel_ip6_policy, NULL); 229 if (err < 0) 230 return err; 231 232 if (!tb[NFTA_TUNNEL_KEY_IP6_DST]) 233 return -EINVAL; 234 235 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) { 236 memcpy(&info->key.u.ipv6.src, 237 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]), 238 sizeof(struct in6_addr)); 239 } 240 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) { 241 memcpy(&info->key.u.ipv6.dst, 242 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]), 243 sizeof(struct in6_addr)); 244 } 245 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]) 246 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]); 247 248 info->mode |= IP_TUNNEL_INFO_IPV6; 249 250 return 0; 251 } 252 253 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = { 254 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 }, 255 }; 256 257 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr, 258 struct nft_tunnel_opts *opts) 259 { 260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1]; 261 int err; 262 263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr, 264 nft_tunnel_opts_vxlan_policy, NULL); 265 if (err < 0) 266 return err; 267 268 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP]) 269 return -EINVAL; 270 271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP])); 272 273 opts->len = sizeof(struct vxlan_metadata); 274 ip_tunnel_flags_zero(opts->flags); 275 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags); 276 277 return 0; 278 } 279 280 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = { 281 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 }, 282 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 }, 283 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, 284 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 }, 285 }; 286 287 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, 288 struct nft_tunnel_opts *opts) 289 { 290 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1]; 291 uint8_t hwid, dir; 292 int err, version; 293 294 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, 295 attr, nft_tunnel_opts_erspan_policy, 296 NULL); 297 if (err < 0) 298 return err; 299 300 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]) 301 return -EINVAL; 302 303 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); 304 switch (version) { 305 case ERSPAN_VERSION: 306 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]) 307 return -EINVAL; 308 309 opts->u.erspan.u.index = 310 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]); 311 break; 312 case ERSPAN_VERSION2: 313 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] || 314 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]) 315 return -EINVAL; 316 317 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]); 318 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]); 319 320 set_hwid(&opts->u.erspan.u.md2, hwid); 321 opts->u.erspan.u.md2.dir = dir; 322 break; 323 default: 324 return -EOPNOTSUPP; 325 } 326 opts->u.erspan.version = version; 327 328 opts->len = sizeof(struct erspan_metadata); 329 ip_tunnel_flags_zero(opts->flags); 330 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags); 331 332 return 0; 333 } 334 335 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = { 336 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 }, 337 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 }, 338 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, 339 }; 340 341 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr, 342 struct nft_tunnel_opts *opts) 343 { 344 struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len); 345 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; 346 int err, data_len; 347 348 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr, 349 nft_tunnel_opts_geneve_policy, NULL); 350 if (err < 0) 351 return err; 352 353 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] || 354 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] || 355 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA]) 356 return -EINVAL; 357 358 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA]; 359 data_len = nla_len(attr); 360 if (data_len % 4) 361 return -EINVAL; 362 363 opts->len += sizeof(*opt) + data_len; 364 if (opts->len > IP_TUNNEL_OPTS_MAX) 365 return -EINVAL; 366 367 memcpy(opt->opt_data, nla_data(attr), data_len); 368 opt->length = data_len / 4; 369 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]); 370 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]); 371 ip_tunnel_flags_zero(opts->flags); 372 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags); 373 374 return 0; 375 } 376 377 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = { 378 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = { 379 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE }, 380 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, }, 381 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, }, 382 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, }, 383 }; 384 385 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx, 386 const struct nlattr *attr, 387 struct ip_tunnel_info *info, 388 struct nft_tunnel_opts *opts) 389 { 390 struct nlattr *nla; 391 int err, rem; 392 u32 type = 0; 393 394 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX, 395 nft_tunnel_opts_policy, NULL); 396 if (err < 0) 397 return err; 398 399 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) { 400 switch (nla_type(nla)) { 401 case NFTA_TUNNEL_KEY_OPTS_VXLAN: 402 if (type) 403 return -EINVAL; 404 err = nft_tunnel_obj_vxlan_init(nla, opts); 405 if (err) 406 return err; 407 type = IP_TUNNEL_VXLAN_OPT_BIT; 408 break; 409 case NFTA_TUNNEL_KEY_OPTS_ERSPAN: 410 if (type) 411 return -EINVAL; 412 err = nft_tunnel_obj_erspan_init(nla, opts); 413 if (err) 414 return err; 415 type = IP_TUNNEL_ERSPAN_OPT_BIT; 416 break; 417 case NFTA_TUNNEL_KEY_OPTS_GENEVE: 418 if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) 419 return -EINVAL; 420 err = nft_tunnel_obj_geneve_init(nla, opts); 421 if (err) 422 return err; 423 type = IP_TUNNEL_GENEVE_OPT_BIT; 424 break; 425 default: 426 return -EOPNOTSUPP; 427 } 428 } 429 430 return err; 431 } 432 433 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = { 434 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, }, 435 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, }, 436 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, }, 437 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, }, 438 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, }, 439 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, }, 440 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, }, 441 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, }, 442 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, }, 443 }; 444 445 static int nft_tunnel_obj_init(const struct nft_ctx *ctx, 446 const struct nlattr * const tb[], 447 struct nft_object *obj) 448 { 449 struct nft_tunnel_obj *priv = nft_obj_data(obj); 450 struct ip_tunnel_info info; 451 struct metadata_dst *md; 452 int err; 453 454 if (!tb[NFTA_TUNNEL_KEY_ID]) 455 return -EINVAL; 456 457 memset(&info, 0, sizeof(info)); 458 info.mode = IP_TUNNEL_INFO_TX; 459 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID])); 460 __set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags); 461 __set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags); 462 __set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags); 463 464 if (tb[NFTA_TUNNEL_KEY_IP]) { 465 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info); 466 if (err < 0) 467 return err; 468 } else if (tb[NFTA_TUNNEL_KEY_IP6]) { 469 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info); 470 if (err < 0) 471 return err; 472 } else { 473 return -EINVAL; 474 } 475 476 if (tb[NFTA_TUNNEL_KEY_SPORT]) { 477 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]); 478 } 479 if (tb[NFTA_TUNNEL_KEY_DPORT]) { 480 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]); 481 } 482 483 if (tb[NFTA_TUNNEL_KEY_FLAGS]) { 484 u32 tun_flags; 485 486 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS])); 487 if (tun_flags & ~NFT_TUNNEL_F_MASK) 488 return -EOPNOTSUPP; 489 490 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX) 491 __clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags); 492 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT) 493 __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, 494 info.key.tun_flags); 495 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER) 496 __set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags); 497 } 498 if (tb[NFTA_TUNNEL_KEY_TOS]) 499 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]); 500 info.key.ttl = nla_get_u8_default(tb[NFTA_TUNNEL_KEY_TTL], U8_MAX); 501 502 if (tb[NFTA_TUNNEL_KEY_OPTS]) { 503 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS], 504 &info, &priv->opts); 505 if (err < 0) 506 return err; 507 } 508 509 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, 510 GFP_KERNEL_ACCOUNT); 511 if (!md) 512 return -ENOMEM; 513 514 memcpy(&md->u.tun_info, &info, sizeof(info)); 515 #ifdef CONFIG_DST_CACHE 516 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT); 517 if (err < 0) { 518 metadata_dst_free(md); 519 return err; 520 } 521 #endif 522 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len, 523 priv->opts.flags); 524 priv->md = md; 525 526 return 0; 527 } 528 529 static inline void nft_tunnel_obj_eval(struct nft_object *obj, 530 struct nft_regs *regs, 531 const struct nft_pktinfo *pkt) 532 { 533 struct nft_tunnel_obj *priv = nft_obj_data(obj); 534 struct sk_buff *skb = pkt->skb; 535 536 skb_dst_drop(skb); 537 dst_hold((struct dst_entry *) priv->md); 538 skb_dst_set(skb, (struct dst_entry *) priv->md); 539 } 540 541 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info) 542 { 543 struct nlattr *nest; 544 545 if (info->mode & IP_TUNNEL_INFO_IPV6) { 546 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6); 547 if (!nest) 548 return -1; 549 550 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, 551 &info->key.u.ipv6.src) < 0 || 552 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, 553 &info->key.u.ipv6.dst) < 0 || 554 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, 555 info->key.label)) { 556 nla_nest_cancel(skb, nest); 557 return -1; 558 } 559 560 nla_nest_end(skb, nest); 561 } else { 562 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP); 563 if (!nest) 564 return -1; 565 566 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, 567 info->key.u.ipv4.src) < 0 || 568 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, 569 info->key.u.ipv4.dst) < 0) { 570 nla_nest_cancel(skb, nest); 571 return -1; 572 } 573 574 nla_nest_end(skb, nest); 575 } 576 577 return 0; 578 } 579 580 static int nft_tunnel_opts_dump(struct sk_buff *skb, 581 struct nft_tunnel_obj *priv) 582 { 583 struct nft_tunnel_opts *opts = &priv->opts; 584 struct nlattr *nest, *inner; 585 586 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS); 587 if (!nest) 588 return -1; 589 590 if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) { 591 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN); 592 if (!inner) 593 goto failure; 594 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP, 595 htonl(opts->u.vxlan.gbp))) 596 goto inner_failure; 597 nla_nest_end(skb, inner); 598 } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) { 599 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN); 600 if (!inner) 601 goto failure; 602 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION, 603 htonl(opts->u.erspan.version))) 604 goto inner_failure; 605 switch (opts->u.erspan.version) { 606 case ERSPAN_VERSION: 607 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX, 608 opts->u.erspan.u.index)) 609 goto inner_failure; 610 break; 611 case ERSPAN_VERSION2: 612 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID, 613 get_hwid(&opts->u.erspan.u.md2)) || 614 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR, 615 opts->u.erspan.u.md2.dir)) 616 goto inner_failure; 617 break; 618 } 619 nla_nest_end(skb, inner); 620 } else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) { 621 struct geneve_opt *opt; 622 int offset = 0; 623 624 while (opts->len > offset) { 625 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE); 626 if (!inner) 627 goto failure; 628 opt = (struct geneve_opt *)(opts->u.data + offset); 629 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, 630 opt->opt_class) || 631 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE, 632 opt->type) || 633 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA, 634 opt->length * 4, opt->opt_data)) 635 goto inner_failure; 636 offset += sizeof(*opt) + opt->length * 4; 637 nla_nest_end(skb, inner); 638 } 639 } 640 nla_nest_end(skb, nest); 641 return 0; 642 643 inner_failure: 644 nla_nest_cancel(skb, inner); 645 failure: 646 nla_nest_cancel(skb, nest); 647 return -1; 648 } 649 650 static int nft_tunnel_ports_dump(struct sk_buff *skb, 651 struct ip_tunnel_info *info) 652 { 653 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 || 654 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0) 655 return -1; 656 657 return 0; 658 } 659 660 static int nft_tunnel_flags_dump(struct sk_buff *skb, 661 struct ip_tunnel_info *info) 662 { 663 u32 flags = 0; 664 665 if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags)) 666 flags |= NFT_TUNNEL_F_DONT_FRAGMENT; 667 if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)) 668 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX; 669 if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags)) 670 flags |= NFT_TUNNEL_F_SEQ_NUMBER; 671 672 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0) 673 return -1; 674 675 return 0; 676 } 677 678 static int nft_tunnel_obj_dump(struct sk_buff *skb, 679 struct nft_object *obj, bool reset) 680 { 681 struct nft_tunnel_obj *priv = nft_obj_data(obj); 682 struct ip_tunnel_info *info = &priv->md->u.tun_info; 683 684 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID, 685 tunnel_id_to_key32(info->key.tun_id)) || 686 nft_tunnel_ip_dump(skb, info) < 0 || 687 nft_tunnel_ports_dump(skb, info) < 0 || 688 nft_tunnel_flags_dump(skb, info) < 0 || 689 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) || 690 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) || 691 nft_tunnel_opts_dump(skb, priv) < 0) 692 goto nla_put_failure; 693 694 return 0; 695 696 nla_put_failure: 697 return -1; 698 } 699 700 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx, 701 struct nft_object *obj) 702 { 703 struct nft_tunnel_obj *priv = nft_obj_data(obj); 704 705 metadata_dst_free(priv->md); 706 } 707 708 static struct nft_object_type nft_tunnel_obj_type; 709 static const struct nft_object_ops nft_tunnel_obj_ops = { 710 .type = &nft_tunnel_obj_type, 711 .size = sizeof(struct nft_tunnel_obj), 712 .eval = nft_tunnel_obj_eval, 713 .init = nft_tunnel_obj_init, 714 .destroy = nft_tunnel_obj_destroy, 715 .dump = nft_tunnel_obj_dump, 716 }; 717 718 static struct nft_object_type nft_tunnel_obj_type __read_mostly = { 719 .type = NFT_OBJECT_TUNNEL, 720 .family = NFPROTO_NETDEV, 721 .ops = &nft_tunnel_obj_ops, 722 .maxattr = NFTA_TUNNEL_KEY_MAX, 723 .policy = nft_tunnel_key_policy, 724 .owner = THIS_MODULE, 725 }; 726 727 static int __init nft_tunnel_module_init(void) 728 { 729 int err; 730 731 err = nft_register_expr(&nft_tunnel_type); 732 if (err < 0) 733 return err; 734 735 err = nft_register_obj(&nft_tunnel_obj_type); 736 if (err < 0) 737 nft_unregister_expr(&nft_tunnel_type); 738 739 return err; 740 } 741 742 static void __exit nft_tunnel_module_exit(void) 743 { 744 nft_unregister_obj(&nft_tunnel_obj_type); 745 nft_unregister_expr(&nft_tunnel_type); 746 } 747 748 module_init(nft_tunnel_module_init); 749 module_exit(nft_tunnel_module_exit); 750 751 MODULE_LICENSE("GPL"); 752 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 753 MODULE_ALIAS_NFT_EXPR("tunnel"); 754 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL); 755 MODULE_DESCRIPTION("nftables tunnel expression support"); 756